commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3811f7072e0d416a04c342ff6cfaec05deda3619
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="http://luke.deentaylor.com/",
)
|
from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="https://github.com/The-Penultimate-Defenestrator/livejson/",
)
|
Change URL to point to GitHub
|
Change URL to point to GitHub
|
Python
|
mit
|
controversial/livejson
|
from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="http://luke.deentaylor.com/",
)
Change URL to point to GitHub
|
from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="https://github.com/The-Penultimate-Defenestrator/livejson/",
)
|
<commit_before>from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="http://luke.deentaylor.com/",
)
<commit_msg>Change URL to point to GitHub<commit_after>
|
from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="https://github.com/The-Penultimate-Defenestrator/livejson/",
)
|
from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="http://luke.deentaylor.com/",
)
Change URL to point to GitHubfrom setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="https://github.com/The-Penultimate-Defenestrator/livejson/",
)
|
<commit_before>from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="http://luke.deentaylor.com/",
)
<commit_msg>Change URL to point to GitHub<commit_after>from setuptools import setup
setup(
name="livejson",
version="0.1",
description="Bind Python objects to JSON files",
long_description=("An interface to transparantly bind Python objects to"
"JSON files so that all changes made to the object are"
"reflected in the JSON file"),
author="Luke Taylor",
author_email="luke@deentaylor.com",
url="https://github.com/The-Penultimate-Defenestrator/livejson/",
)
|
673dea4b1415b32adb4eb9cc38c6cfa0f5076f93
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import pambox
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='pambox',
version=pambox.__version__,
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
author='Alexandre Chabot-Leclerc',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='pambox@alex.alexchabot.net',
description='A Python auditory modeling toolbox',
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.test.test_pambox',
classifiers = [
'Programming Language :: Python',
'Development Status :: 1 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
],
extras_require={
'testing': ['pytest'],
}
)
|
Add basic, and probably incomplete setyp.py.
|
Add basic, and probably incomplete setyp.py.
|
Python
|
bsd-3-clause
|
achabotl/pambox
|
Add basic, and probably incomplete setyp.py.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import pambox
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='pambox',
version=pambox.__version__,
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
author='Alexandre Chabot-Leclerc',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='pambox@alex.alexchabot.net',
description='A Python auditory modeling toolbox',
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.test.test_pambox',
classifiers = [
'Programming Language :: Python',
'Development Status :: 1 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
],
extras_require={
'testing': ['pytest'],
}
)
|
<commit_before><commit_msg>Add basic, and probably incomplete setyp.py.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import pambox
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='pambox',
version=pambox.__version__,
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
author='Alexandre Chabot-Leclerc',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='pambox@alex.alexchabot.net',
description='A Python auditory modeling toolbox',
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.test.test_pambox',
classifiers = [
'Programming Language :: Python',
'Development Status :: 1 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
],
extras_require={
'testing': ['pytest'],
}
)
|
Add basic, and probably incomplete setyp.py.#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import pambox
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='pambox',
version=pambox.__version__,
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
author='Alexandre Chabot-Leclerc',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='pambox@alex.alexchabot.net',
description='A Python auditory modeling toolbox',
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.test.test_pambox',
classifiers = [
'Programming Language :: Python',
'Development Status :: 1 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
],
extras_require={
'testing': ['pytest'],
}
)
|
<commit_before><commit_msg>Add basic, and probably incomplete setyp.py.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import pambox
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md', 'CHANGES.txt')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='pambox',
version=pambox.__version__,
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
author='Alexandre Chabot-Leclerc',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='pambox@alex.alexchabot.net',
description='A Python auditory modeling toolbox',
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.test.test_pambox',
classifiers = [
'Programming Language :: Python',
'Development Status :: 1 - Alpha',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
],
extras_require={
'testing': ['pytest'],
}
)
|
|
4a43bc36f0f3a413b222f2cb1fac316240168aa2
|
LPC.py
|
LPC.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 22:01:37 2016
@author: ORCHISAMA
"""
#calculate LPC coefficients from sound file
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def autocorr(x):
n = len(x)
variance = np.var(x)
x = x - np.mean(x)
r = np.correlate(x, x, mode = 'full')[-n:] #n numbers from last index (l-n to l)
result = r/(variance*(np.arange(n, 0, -1)))
return result
def createSymmetricMatrix(acf,p):
R = np.empty((p,p))
for i in range(p):
for j in range(p):
R[i,j] = acf[np.abs(i-j)]
return R
def lpc(s,fs,p):
#divide into segments of 25 ms with overlap of 10ms
nSamples = np.int32(0.025*fs)
overlap = np.int32(0.01*fs)
nFrames = np.int32(np.ceil(len(s)/(nSamples-overlap)))
#zero padding to make signal length long enough to have nFrames
padding = ((nSamples-overlap)*nFrames) - len(s)
if padding > 0:
signal = np.append(s, np.zeros(padding))
else:
signal = s
segment = np.empty((nSamples, nFrames))
start = 0
for i in range(nFrames):
segment[:,i] = signal[start:start+nSamples]
start = (nSamples-overlap)*i
#calculate LPC with Yule-Walker
lpc_coeffs = np.empty((p, nFrames))
for i in range(nFrames):
acf = autocorr(segment[:,i])
# plt.figure(1)
# plt.plot(acf)
# plt.xlabel('lags')
# plt.ylabel('Autocorrelation coefficients')
# plt.axis([0, np.size(acf), -1, 1])
# plt.title('Autocorrelation function')
# break
r = -acf[1:p+1].T
R = createSymmetricMatrix(acf,p)
lpc_coeffs[:,i] = np.dot(np.linalg.inv(R),r)
lpc_coeffs[:,i] = lpc_coeffs[:,i]/np.max(np.abs(lpc_coeffs[:,i]))
return lpc_coeffs
|
Add program to calculate Linear Prediction Coefficients
|
Add program to calculate Linear Prediction Coefficients
|
Python
|
mit
|
orchidas/Speaker-Recognition
|
Add program to calculate Linear Prediction Coefficients
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 22:01:37 2016
@author: ORCHISAMA
"""
#calculate LPC coefficients from sound file
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def autocorr(x):
n = len(x)
variance = np.var(x)
x = x - np.mean(x)
r = np.correlate(x, x, mode = 'full')[-n:] #n numbers from last index (l-n to l)
result = r/(variance*(np.arange(n, 0, -1)))
return result
def createSymmetricMatrix(acf,p):
R = np.empty((p,p))
for i in range(p):
for j in range(p):
R[i,j] = acf[np.abs(i-j)]
return R
def lpc(s,fs,p):
#divide into segments of 25 ms with overlap of 10ms
nSamples = np.int32(0.025*fs)
overlap = np.int32(0.01*fs)
nFrames = np.int32(np.ceil(len(s)/(nSamples-overlap)))
#zero padding to make signal length long enough to have nFrames
padding = ((nSamples-overlap)*nFrames) - len(s)
if padding > 0:
signal = np.append(s, np.zeros(padding))
else:
signal = s
segment = np.empty((nSamples, nFrames))
start = 0
for i in range(nFrames):
segment[:,i] = signal[start:start+nSamples]
start = (nSamples-overlap)*i
#calculate LPC with Yule-Walker
lpc_coeffs = np.empty((p, nFrames))
for i in range(nFrames):
acf = autocorr(segment[:,i])
# plt.figure(1)
# plt.plot(acf)
# plt.xlabel('lags')
# plt.ylabel('Autocorrelation coefficients')
# plt.axis([0, np.size(acf), -1, 1])
# plt.title('Autocorrelation function')
# break
r = -acf[1:p+1].T
R = createSymmetricMatrix(acf,p)
lpc_coeffs[:,i] = np.dot(np.linalg.inv(R),r)
lpc_coeffs[:,i] = lpc_coeffs[:,i]/np.max(np.abs(lpc_coeffs[:,i]))
return lpc_coeffs
|
<commit_before><commit_msg>Add program to calculate Linear Prediction Coefficients<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 22:01:37 2016
@author: ORCHISAMA
"""
#calculate LPC coefficients from sound file
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def autocorr(x):
n = len(x)
variance = np.var(x)
x = x - np.mean(x)
r = np.correlate(x, x, mode = 'full')[-n:] #n numbers from last index (l-n to l)
result = r/(variance*(np.arange(n, 0, -1)))
return result
def createSymmetricMatrix(acf,p):
R = np.empty((p,p))
for i in range(p):
for j in range(p):
R[i,j] = acf[np.abs(i-j)]
return R
def lpc(s,fs,p):
#divide into segments of 25 ms with overlap of 10ms
nSamples = np.int32(0.025*fs)
overlap = np.int32(0.01*fs)
nFrames = np.int32(np.ceil(len(s)/(nSamples-overlap)))
#zero padding to make signal length long enough to have nFrames
padding = ((nSamples-overlap)*nFrames) - len(s)
if padding > 0:
signal = np.append(s, np.zeros(padding))
else:
signal = s
segment = np.empty((nSamples, nFrames))
start = 0
for i in range(nFrames):
segment[:,i] = signal[start:start+nSamples]
start = (nSamples-overlap)*i
#calculate LPC with Yule-Walker
lpc_coeffs = np.empty((p, nFrames))
for i in range(nFrames):
acf = autocorr(segment[:,i])
# plt.figure(1)
# plt.plot(acf)
# plt.xlabel('lags')
# plt.ylabel('Autocorrelation coefficients')
# plt.axis([0, np.size(acf), -1, 1])
# plt.title('Autocorrelation function')
# break
r = -acf[1:p+1].T
R = createSymmetricMatrix(acf,p)
lpc_coeffs[:,i] = np.dot(np.linalg.inv(R),r)
lpc_coeffs[:,i] = lpc_coeffs[:,i]/np.max(np.abs(lpc_coeffs[:,i]))
return lpc_coeffs
|
Add program to calculate Linear Prediction Coefficients# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 22:01:37 2016
@author: ORCHISAMA
"""
#calculate LPC coefficients from sound file
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def autocorr(x):
n = len(x)
variance = np.var(x)
x = x - np.mean(x)
r = np.correlate(x, x, mode = 'full')[-n:] #n numbers from last index (l-n to l)
result = r/(variance*(np.arange(n, 0, -1)))
return result
def createSymmetricMatrix(acf,p):
R = np.empty((p,p))
for i in range(p):
for j in range(p):
R[i,j] = acf[np.abs(i-j)]
return R
def lpc(s,fs,p):
#divide into segments of 25 ms with overlap of 10ms
nSamples = np.int32(0.025*fs)
overlap = np.int32(0.01*fs)
nFrames = np.int32(np.ceil(len(s)/(nSamples-overlap)))
#zero padding to make signal length long enough to have nFrames
padding = ((nSamples-overlap)*nFrames) - len(s)
if padding > 0:
signal = np.append(s, np.zeros(padding))
else:
signal = s
segment = np.empty((nSamples, nFrames))
start = 0
for i in range(nFrames):
segment[:,i] = signal[start:start+nSamples]
start = (nSamples-overlap)*i
#calculate LPC with Yule-Walker
lpc_coeffs = np.empty((p, nFrames))
for i in range(nFrames):
acf = autocorr(segment[:,i])
# plt.figure(1)
# plt.plot(acf)
# plt.xlabel('lags')
# plt.ylabel('Autocorrelation coefficients')
# plt.axis([0, np.size(acf), -1, 1])
# plt.title('Autocorrelation function')
# break
r = -acf[1:p+1].T
R = createSymmetricMatrix(acf,p)
lpc_coeffs[:,i] = np.dot(np.linalg.inv(R),r)
lpc_coeffs[:,i] = lpc_coeffs[:,i]/np.max(np.abs(lpc_coeffs[:,i]))
return lpc_coeffs
|
<commit_before><commit_msg>Add program to calculate Linear Prediction Coefficients<commit_after># -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 22:01:37 2016
@author: ORCHISAMA
"""
#calculate LPC coefficients from sound file
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def autocorr(x):
n = len(x)
variance = np.var(x)
x = x - np.mean(x)
r = np.correlate(x, x, mode = 'full')[-n:] #n numbers from last index (l-n to l)
result = r/(variance*(np.arange(n, 0, -1)))
return result
def createSymmetricMatrix(acf,p):
R = np.empty((p,p))
for i in range(p):
for j in range(p):
R[i,j] = acf[np.abs(i-j)]
return R
def lpc(s,fs,p):
#divide into segments of 25 ms with overlap of 10ms
nSamples = np.int32(0.025*fs)
overlap = np.int32(0.01*fs)
nFrames = np.int32(np.ceil(len(s)/(nSamples-overlap)))
#zero padding to make signal length long enough to have nFrames
padding = ((nSamples-overlap)*nFrames) - len(s)
if padding > 0:
signal = np.append(s, np.zeros(padding))
else:
signal = s
segment = np.empty((nSamples, nFrames))
start = 0
for i in range(nFrames):
segment[:,i] = signal[start:start+nSamples]
start = (nSamples-overlap)*i
#calculate LPC with Yule-Walker
lpc_coeffs = np.empty((p, nFrames))
for i in range(nFrames):
acf = autocorr(segment[:,i])
# plt.figure(1)
# plt.plot(acf)
# plt.xlabel('lags')
# plt.ylabel('Autocorrelation coefficients')
# plt.axis([0, np.size(acf), -1, 1])
# plt.title('Autocorrelation function')
# break
r = -acf[1:p+1].T
R = createSymmetricMatrix(acf,p)
lpc_coeffs[:,i] = np.dot(np.linalg.inv(R),r)
lpc_coeffs[:,i] = lpc_coeffs[:,i]/np.max(np.abs(lpc_coeffs[:,i]))
return lpc_coeffs
|
|
512abbbda1fe249ef59b8a416c7c92b195940f27
|
analysis/__init__.py
|
analysis/__init__.py
|
import os
import retrying
import sys
from .. import BaseAction
from .. import experiments
from ..helpers import get_first_existing_path, get_multiprint, save_pickle_gz
class Analysis(BaseAction):
DEFAULT_RESULTS_ROOT = os.path.join('output', 'analysis')
def _setup(self, config, status):
config.define('name', default=os.path.splitext(os.path.basename(sys.argv[0]))[0])
config.define('title', default='')
self._init_results_directory(config)
def _init_results_directory(self, config):
relative_results_path = config.name
if config.title:
relative_results_path = os.path.join(relative_results_path, config.title)
config.define('path.result.analysis.base', default=Analysis.DEFAULT_RESULTS_ROOT)
config.define('path.result.analysis.relative', default=relative_results_path)
self.results_path = os.path.join(config.path.result.analysis.base, config.path.result.analysis.relative)
if not os.path.exists(self.results_path):
os.makedirs(self.results_path)
@retrying.retry
def save_metadata(self):
self.log('Saving metadata... ', end='')
config_dict = self.config.to_dict()
save_pickle_gz(sys.argv, self.get_results_path('__args'))
save_pickle_gz(config_dict, self.get_results_path('__env'))
with open(self.get_results_path('__args.txt'), 'w') as f:
f.write(' '.join(sys.argv))
with open(self.get_results_path('__env.txt'), 'w') as f:
longest_key_length = max(len(key) for key in config_dict.keys())
f.write('\n'.join(['{: <{width}} = {}'.format(key.upper(), config_dict[key], width=longest_key_length) for key in sorted(config_dict.keys())]))
self.log('Done.')
def get_results_path(self, filename=None):
return os.path.join(self.results_path, filename) if filename is not None else self.results_path
class InPlaceAnalysis(Analysis):
def _setup(self, config, status):
config.require('experiment')
self.experiment_path = get_first_existing_path(
config.experiment,
experiments.get_results_path(config, config.experiment))
if not self.experiment_path:
raise FileNotFoundError('Cannot find a valid experiment path for: ' + config.experiment)
config.define('path.result.analysis.base', default=os.path.join(self.experiment_path, 'analysis'))
super()._setup(config, status)
|
Add Analysis and InPlaceAnalysis base classes for analyzing experiment results.
|
Add Analysis and InPlaceAnalysis base classes for analyzing experiment results.
|
Python
|
mit
|
tsoontornwutikul/mlxm
|
Add Analysis and InPlaceAnalysis base classes for analyzing experiment results.
|
import os
import retrying
import sys
from .. import BaseAction
from .. import experiments
from ..helpers import get_first_existing_path, get_multiprint, save_pickle_gz
class Analysis(BaseAction):
DEFAULT_RESULTS_ROOT = os.path.join('output', 'analysis')
def _setup(self, config, status):
config.define('name', default=os.path.splitext(os.path.basename(sys.argv[0]))[0])
config.define('title', default='')
self._init_results_directory(config)
def _init_results_directory(self, config):
relative_results_path = config.name
if config.title:
relative_results_path = os.path.join(relative_results_path, config.title)
config.define('path.result.analysis.base', default=Analysis.DEFAULT_RESULTS_ROOT)
config.define('path.result.analysis.relative', default=relative_results_path)
self.results_path = os.path.join(config.path.result.analysis.base, config.path.result.analysis.relative)
if not os.path.exists(self.results_path):
os.makedirs(self.results_path)
@retrying.retry
def save_metadata(self):
self.log('Saving metadata... ', end='')
config_dict = self.config.to_dict()
save_pickle_gz(sys.argv, self.get_results_path('__args'))
save_pickle_gz(config_dict, self.get_results_path('__env'))
with open(self.get_results_path('__args.txt'), 'w') as f:
f.write(' '.join(sys.argv))
with open(self.get_results_path('__env.txt'), 'w') as f:
longest_key_length = max(len(key) for key in config_dict.keys())
f.write('\n'.join(['{: <{width}} = {}'.format(key.upper(), config_dict[key], width=longest_key_length) for key in sorted(config_dict.keys())]))
self.log('Done.')
def get_results_path(self, filename=None):
return os.path.join(self.results_path, filename) if filename is not None else self.results_path
class InPlaceAnalysis(Analysis):
def _setup(self, config, status):
config.require('experiment')
self.experiment_path = get_first_existing_path(
config.experiment,
experiments.get_results_path(config, config.experiment))
if not self.experiment_path:
raise FileNotFoundError('Cannot find a valid experiment path for: ' + config.experiment)
config.define('path.result.analysis.base', default=os.path.join(self.experiment_path, 'analysis'))
super()._setup(config, status)
|
<commit_before><commit_msg>Add Analysis and InPlaceAnalysis base classes for analyzing experiment results.<commit_after>
|
import os
import retrying
import sys
from .. import BaseAction
from .. import experiments
from ..helpers import get_first_existing_path, get_multiprint, save_pickle_gz
class Analysis(BaseAction):
DEFAULT_RESULTS_ROOT = os.path.join('output', 'analysis')
def _setup(self, config, status):
config.define('name', default=os.path.splitext(os.path.basename(sys.argv[0]))[0])
config.define('title', default='')
self._init_results_directory(config)
def _init_results_directory(self, config):
relative_results_path = config.name
if config.title:
relative_results_path = os.path.join(relative_results_path, config.title)
config.define('path.result.analysis.base', default=Analysis.DEFAULT_RESULTS_ROOT)
config.define('path.result.analysis.relative', default=relative_results_path)
self.results_path = os.path.join(config.path.result.analysis.base, config.path.result.analysis.relative)
if not os.path.exists(self.results_path):
os.makedirs(self.results_path)
@retrying.retry
def save_metadata(self):
self.log('Saving metadata... ', end='')
config_dict = self.config.to_dict()
save_pickle_gz(sys.argv, self.get_results_path('__args'))
save_pickle_gz(config_dict, self.get_results_path('__env'))
with open(self.get_results_path('__args.txt'), 'w') as f:
f.write(' '.join(sys.argv))
with open(self.get_results_path('__env.txt'), 'w') as f:
longest_key_length = max(len(key) for key in config_dict.keys())
f.write('\n'.join(['{: <{width}} = {}'.format(key.upper(), config_dict[key], width=longest_key_length) for key in sorted(config_dict.keys())]))
self.log('Done.')
def get_results_path(self, filename=None):
return os.path.join(self.results_path, filename) if filename is not None else self.results_path
class InPlaceAnalysis(Analysis):
def _setup(self, config, status):
config.require('experiment')
self.experiment_path = get_first_existing_path(
config.experiment,
experiments.get_results_path(config, config.experiment))
if not self.experiment_path:
raise FileNotFoundError('Cannot find a valid experiment path for: ' + config.experiment)
config.define('path.result.analysis.base', default=os.path.join(self.experiment_path, 'analysis'))
super()._setup(config, status)
|
Add Analysis and InPlaceAnalysis base classes for analyzing experiment results.import os
import retrying
import sys
from .. import BaseAction
from .. import experiments
from ..helpers import get_first_existing_path, get_multiprint, save_pickle_gz
class Analysis(BaseAction):
DEFAULT_RESULTS_ROOT = os.path.join('output', 'analysis')
def _setup(self, config, status):
config.define('name', default=os.path.splitext(os.path.basename(sys.argv[0]))[0])
config.define('title', default='')
self._init_results_directory(config)
def _init_results_directory(self, config):
relative_results_path = config.name
if config.title:
relative_results_path = os.path.join(relative_results_path, config.title)
config.define('path.result.analysis.base', default=Analysis.DEFAULT_RESULTS_ROOT)
config.define('path.result.analysis.relative', default=relative_results_path)
self.results_path = os.path.join(config.path.result.analysis.base, config.path.result.analysis.relative)
if not os.path.exists(self.results_path):
os.makedirs(self.results_path)
@retrying.retry
def save_metadata(self):
self.log('Saving metadata... ', end='')
config_dict = self.config.to_dict()
save_pickle_gz(sys.argv, self.get_results_path('__args'))
save_pickle_gz(config_dict, self.get_results_path('__env'))
with open(self.get_results_path('__args.txt'), 'w') as f:
f.write(' '.join(sys.argv))
with open(self.get_results_path('__env.txt'), 'w') as f:
longest_key_length = max(len(key) for key in config_dict.keys())
f.write('\n'.join(['{: <{width}} = {}'.format(key.upper(), config_dict[key], width=longest_key_length) for key in sorted(config_dict.keys())]))
self.log('Done.')
def get_results_path(self, filename=None):
return os.path.join(self.results_path, filename) if filename is not None else self.results_path
class InPlaceAnalysis(Analysis):
def _setup(self, config, status):
config.require('experiment')
self.experiment_path = get_first_existing_path(
config.experiment,
experiments.get_results_path(config, config.experiment))
if not self.experiment_path:
raise FileNotFoundError('Cannot find a valid experiment path for: ' + config.experiment)
config.define('path.result.analysis.base', default=os.path.join(self.experiment_path, 'analysis'))
super()._setup(config, status)
|
<commit_before><commit_msg>Add Analysis and InPlaceAnalysis base classes for analyzing experiment results.<commit_after>import os
import retrying
import sys
from .. import BaseAction
from .. import experiments
from ..helpers import get_first_existing_path, get_multiprint, save_pickle_gz
class Analysis(BaseAction):
DEFAULT_RESULTS_ROOT = os.path.join('output', 'analysis')
def _setup(self, config, status):
config.define('name', default=os.path.splitext(os.path.basename(sys.argv[0]))[0])
config.define('title', default='')
self._init_results_directory(config)
def _init_results_directory(self, config):
relative_results_path = config.name
if config.title:
relative_results_path = os.path.join(relative_results_path, config.title)
config.define('path.result.analysis.base', default=Analysis.DEFAULT_RESULTS_ROOT)
config.define('path.result.analysis.relative', default=relative_results_path)
self.results_path = os.path.join(config.path.result.analysis.base, config.path.result.analysis.relative)
if not os.path.exists(self.results_path):
os.makedirs(self.results_path)
@retrying.retry
def save_metadata(self):
self.log('Saving metadata... ', end='')
config_dict = self.config.to_dict()
save_pickle_gz(sys.argv, self.get_results_path('__args'))
save_pickle_gz(config_dict, self.get_results_path('__env'))
with open(self.get_results_path('__args.txt'), 'w') as f:
f.write(' '.join(sys.argv))
with open(self.get_results_path('__env.txt'), 'w') as f:
longest_key_length = max(len(key) for key in config_dict.keys())
f.write('\n'.join(['{: <{width}} = {}'.format(key.upper(), config_dict[key], width=longest_key_length) for key in sorted(config_dict.keys())]))
self.log('Done.')
def get_results_path(self, filename=None):
return os.path.join(self.results_path, filename) if filename is not None else self.results_path
class InPlaceAnalysis(Analysis):
def _setup(self, config, status):
config.require('experiment')
self.experiment_path = get_first_existing_path(
config.experiment,
experiments.get_results_path(config, config.experiment))
if not self.experiment_path:
raise FileNotFoundError('Cannot find a valid experiment path for: ' + config.experiment)
config.define('path.result.analysis.base', default=os.path.join(self.experiment_path, 'analysis'))
super()._setup(config, status)
|
|
53be0ba69200c907c8172e255aab84beb19506fb
|
pitchfork/template_filters.py
|
pitchfork/template_filters.py
|
import re
import json
def nl2br(value):
if value:
_newline_re = re.compile(r'(?:\r\n|\r|\n)')
return _newline_re.sub('<br>', value)
def tab2spaces(value):
if value:
text = re.sub('\t', ' ' * 4, value)
return text
def unslug(value):
text = re.sub('_', ' ', value)
return text
def slug(value):
text = re.sub('\s+', '_', value)
return text
def check_regex(value):
if re.match('variable', value):
return True
else:
return False
def pretty_print_json(string):
return json.dumps(
string,
sort_keys=False,
indent=4,
separators=(',', ':')
)
def remove_slash(string):
if string:
return re.sub('\/', '', string)
|
Move template filters out of init file
|
Move template filters out of init file
|
Python
|
apache-2.0
|
oldarmyc/pitchfork,rackerlabs/pitchfork,oldarmyc/pitchfork,oldarmyc/pitchfork,rackerlabs/pitchfork,rackerlabs/pitchfork
|
Move template filters out of init file
|
import re
import json
def nl2br(value):
if value:
_newline_re = re.compile(r'(?:\r\n|\r|\n)')
return _newline_re.sub('<br>', value)
def tab2spaces(value):
if value:
text = re.sub('\t', ' ' * 4, value)
return text
def unslug(value):
text = re.sub('_', ' ', value)
return text
def slug(value):
text = re.sub('\s+', '_', value)
return text
def check_regex(value):
if re.match('variable', value):
return True
else:
return False
def pretty_print_json(string):
return json.dumps(
string,
sort_keys=False,
indent=4,
separators=(',', ':')
)
def remove_slash(string):
if string:
return re.sub('\/', '', string)
|
<commit_before><commit_msg>Move template filters out of init file<commit_after>
|
import re
import json
def nl2br(value):
if value:
_newline_re = re.compile(r'(?:\r\n|\r|\n)')
return _newline_re.sub('<br>', value)
def tab2spaces(value):
if value:
text = re.sub('\t', ' ' * 4, value)
return text
def unslug(value):
text = re.sub('_', ' ', value)
return text
def slug(value):
text = re.sub('\s+', '_', value)
return text
def check_regex(value):
if re.match('variable', value):
return True
else:
return False
def pretty_print_json(string):
return json.dumps(
string,
sort_keys=False,
indent=4,
separators=(',', ':')
)
def remove_slash(string):
if string:
return re.sub('\/', '', string)
|
Move template filters out of init file
import re
import json
def nl2br(value):
if value:
_newline_re = re.compile(r'(?:\r\n|\r|\n)')
return _newline_re.sub('<br>', value)
def tab2spaces(value):
if value:
text = re.sub('\t', ' ' * 4, value)
return text
def unslug(value):
text = re.sub('_', ' ', value)
return text
def slug(value):
text = re.sub('\s+', '_', value)
return text
def check_regex(value):
if re.match('variable', value):
return True
else:
return False
def pretty_print_json(string):
return json.dumps(
string,
sort_keys=False,
indent=4,
separators=(',', ':')
)
def remove_slash(string):
if string:
return re.sub('\/', '', string)
|
<commit_before><commit_msg>Move template filters out of init file<commit_after>
import re
import json
def nl2br(value):
if value:
_newline_re = re.compile(r'(?:\r\n|\r|\n)')
return _newline_re.sub('<br>', value)
def tab2spaces(value):
if value:
text = re.sub('\t', ' ' * 4, value)
return text
def unslug(value):
text = re.sub('_', ' ', value)
return text
def slug(value):
text = re.sub('\s+', '_', value)
return text
def check_regex(value):
if re.match('variable', value):
return True
else:
return False
def pretty_print_json(string):
return json.dumps(
string,
sort_keys=False,
indent=4,
separators=(',', ':')
)
def remove_slash(string):
if string:
return re.sub('\/', '', string)
|
|
2e97821b1d1a7f30a73010843e19ad66780a0522
|
sword/submitOnZenodo.py
|
sword/submitOnZenodo.py
|
import json
import requests
from os.path import basename
from dissemin.settings import ZENODO_KEY
#THIS IS PRIVATE
#url = "https://zenodo.org/api/deposit/depositions/1234/files?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv"
#TODO error handling
def submitPubli(paper,filePdf):
r = requests.get("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY)
print(r.status_code)
headers = {"Content-Type": "application/json"}
r = requests.post("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY , data="{}", headers=headers)
print(r.status_code)
deposition_id = r.json()['id']
data = {'filename':basename(filePdf)}
files = {'file': open(filePdf, 'rb')}
r = requests.post("https://zenodo.org/api/deposit/depositions/%s/files?access_token=%s" % (deposition_id,ZENODO_KEY), data=data, files=files)
print(r.status_code)
abstract = "No abstract"
for record in paper.sorted_oai_records:
if record.description:
abstract = record.description
break
data = {"metadata": {"title": paper.title,
"upload_type": "publication",
"publication_type": "conferencepaper",
"description": abstract,
"creators": map(lambda x:{"name": x.name.last +", " + x.name.first , "affiliation" : "ENS" } ,paper.sorted_authors)}}
for publi in paper.publication_set.all():
if publi.pubdate:
# TODO output more precise date if available
data['metadata']['publication_date'] = str(publi.pubdate.year)+"-01-01"
break
for publi in paper.publication_set.all():
if publi.doi:
data['metadata']['doi']= publi.doi
break
r = requests.put("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY), data=json.dumps(data), headers=headers)
print(r.status_code)
r = requests.delete("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY) )
# r = requests.post("https://zenodo.org/api/deposit/depositions/%s/actions/publish?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv" % deposition_id)
# print(r.status_code)
return r
|
Add tracking of zenodo file, put the secret key in the secret place.
|
Add tracking of zenodo file, put the secret key in the secret place.
|
Python
|
agpl-3.0
|
dissemin/dissemin,Lysxia/dissemin,Lysxia/dissemin,dissemin/dissemin,Lysxia/dissemin,wetneb/dissemin,dissemin/dissemin,Lysxia/dissemin,wetneb/dissemin,dissemin/dissemin,wetneb/dissemin,wetneb/dissemin,dissemin/dissemin
|
Add tracking of zenodo file, put the secret key in the secret place.
|
import json
import requests
from os.path import basename
from dissemin.settings import ZENODO_KEY
#THIS IS PRIVATE
#url = "https://zenodo.org/api/deposit/depositions/1234/files?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv"
#TODO error handling
def submitPubli(paper,filePdf):
r = requests.get("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY)
print(r.status_code)
headers = {"Content-Type": "application/json"}
r = requests.post("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY , data="{}", headers=headers)
print(r.status_code)
deposition_id = r.json()['id']
data = {'filename':basename(filePdf)}
files = {'file': open(filePdf, 'rb')}
r = requests.post("https://zenodo.org/api/deposit/depositions/%s/files?access_token=%s" % (deposition_id,ZENODO_KEY), data=data, files=files)
print(r.status_code)
abstract = "No abstract"
for record in paper.sorted_oai_records:
if record.description:
abstract = record.description
break
data = {"metadata": {"title": paper.title,
"upload_type": "publication",
"publication_type": "conferencepaper",
"description": abstract,
"creators": map(lambda x:{"name": x.name.last +", " + x.name.first , "affiliation" : "ENS" } ,paper.sorted_authors)}}
for publi in paper.publication_set.all():
if publi.pubdate:
# TODO output more precise date if available
data['metadata']['publication_date'] = str(publi.pubdate.year)+"-01-01"
break
for publi in paper.publication_set.all():
if publi.doi:
data['metadata']['doi']= publi.doi
break
r = requests.put("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY), data=json.dumps(data), headers=headers)
print(r.status_code)
r = requests.delete("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY) )
# r = requests.post("https://zenodo.org/api/deposit/depositions/%s/actions/publish?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv" % deposition_id)
# print(r.status_code)
return r
|
<commit_before><commit_msg>Add tracking of zenodo file, put the secret key in the secret place.<commit_after>
|
import json
import requests
from os.path import basename
from dissemin.settings import ZENODO_KEY
#THIS IS PRIVATE
#url = "https://zenodo.org/api/deposit/depositions/1234/files?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv"
#TODO error handling
def submitPubli(paper,filePdf):
r = requests.get("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY)
print(r.status_code)
headers = {"Content-Type": "application/json"}
r = requests.post("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY , data="{}", headers=headers)
print(r.status_code)
deposition_id = r.json()['id']
data = {'filename':basename(filePdf)}
files = {'file': open(filePdf, 'rb')}
r = requests.post("https://zenodo.org/api/deposit/depositions/%s/files?access_token=%s" % (deposition_id,ZENODO_KEY), data=data, files=files)
print(r.status_code)
abstract = "No abstract"
for record in paper.sorted_oai_records:
if record.description:
abstract = record.description
break
data = {"metadata": {"title": paper.title,
"upload_type": "publication",
"publication_type": "conferencepaper",
"description": abstract,
"creators": map(lambda x:{"name": x.name.last +", " + x.name.first , "affiliation" : "ENS" } ,paper.sorted_authors)}}
for publi in paper.publication_set.all():
if publi.pubdate:
# TODO output more precise date if available
data['metadata']['publication_date'] = str(publi.pubdate.year)+"-01-01"
break
for publi in paper.publication_set.all():
if publi.doi:
data['metadata']['doi']= publi.doi
break
r = requests.put("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY), data=json.dumps(data), headers=headers)
print(r.status_code)
r = requests.delete("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY) )
# r = requests.post("https://zenodo.org/api/deposit/depositions/%s/actions/publish?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv" % deposition_id)
# print(r.status_code)
return r
|
Add tracking of zenodo file, put the secret key in the secret place.import json
import requests
from os.path import basename
from dissemin.settings import ZENODO_KEY
#THIS IS PRIVATE
#url = "https://zenodo.org/api/deposit/depositions/1234/files?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv"
#TODO error handling
def submitPubli(paper,filePdf):
r = requests.get("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY)
print(r.status_code)
headers = {"Content-Type": "application/json"}
r = requests.post("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY , data="{}", headers=headers)
print(r.status_code)
deposition_id = r.json()['id']
data = {'filename':basename(filePdf)}
files = {'file': open(filePdf, 'rb')}
r = requests.post("https://zenodo.org/api/deposit/depositions/%s/files?access_token=%s" % (deposition_id,ZENODO_KEY), data=data, files=files)
print(r.status_code)
abstract = "No abstract"
for record in paper.sorted_oai_records:
if record.description:
abstract = record.description
break
data = {"metadata": {"title": paper.title,
"upload_type": "publication",
"publication_type": "conferencepaper",
"description": abstract,
"creators": map(lambda x:{"name": x.name.last +", " + x.name.first , "affiliation" : "ENS" } ,paper.sorted_authors)}}
for publi in paper.publication_set.all():
if publi.pubdate:
# TODO output more precise date if available
data['metadata']['publication_date'] = str(publi.pubdate.year)+"-01-01"
break
for publi in paper.publication_set.all():
if publi.doi:
data['metadata']['doi']= publi.doi
break
r = requests.put("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY), data=json.dumps(data), headers=headers)
print(r.status_code)
r = requests.delete("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY) )
# r = requests.post("https://zenodo.org/api/deposit/depositions/%s/actions/publish?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv" % deposition_id)
# print(r.status_code)
return r
|
<commit_before><commit_msg>Add tracking of zenodo file, put the secret key in the secret place.<commit_after>import json
import requests
from os.path import basename
from dissemin.settings import ZENODO_KEY
#THIS IS PRIVATE
#url = "https://zenodo.org/api/deposit/depositions/1234/files?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv"
#TODO error handling
def submitPubli(paper,filePdf):
r = requests.get("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY)
print(r.status_code)
headers = {"Content-Type": "application/json"}
r = requests.post("https://zenodo.org/api/deposit/depositions?access_token=" + ZENODO_KEY , data="{}", headers=headers)
print(r.status_code)
deposition_id = r.json()['id']
data = {'filename':basename(filePdf)}
files = {'file': open(filePdf, 'rb')}
r = requests.post("https://zenodo.org/api/deposit/depositions/%s/files?access_token=%s" % (deposition_id,ZENODO_KEY), data=data, files=files)
print(r.status_code)
abstract = "No abstract"
for record in paper.sorted_oai_records:
if record.description:
abstract = record.description
break
data = {"metadata": {"title": paper.title,
"upload_type": "publication",
"publication_type": "conferencepaper",
"description": abstract,
"creators": map(lambda x:{"name": x.name.last +", " + x.name.first , "affiliation" : "ENS" } ,paper.sorted_authors)}}
for publi in paper.publication_set.all():
if publi.pubdate:
# TODO output more precise date if available
data['metadata']['publication_date'] = str(publi.pubdate.year)+"-01-01"
break
for publi in paper.publication_set.all():
if publi.doi:
data['metadata']['doi']= publi.doi
break
r = requests.put("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY), data=json.dumps(data), headers=headers)
print(r.status_code)
r = requests.delete("https://zenodo.org/api/deposit/depositions/%s?access_token=%s" % ( deposition_id, ZENODO_KEY) )
# r = requests.post("https://zenodo.org/api/deposit/depositions/%s/actions/publish?access_token=2SsQE9VkkgDQG1WDjrvrZqTJtkmsGHICEaccBY6iAEuBlSTdMC6QvcTR2HRv" % deposition_id)
# print(r.status_code)
return r
|
|
dd129aa3bb7d2115294a091a347648c423480b6e
|
Python/check_n_double_n.py
|
Python/check_n_double_n.py
|
# https://leetcode.com/problems/check-if-n-and-its-double-exist/
# Given an array arr of integers, check if there exists two integers N and M such that
# N is the double of M ( i.e. N = 2 * M).
# More formally check if there exists two indices i and j such that :
# i != j
# 0 <= i, j < arr.length
# arr[i] == 2 * arr[j]
import pytest
class Solution:
def checkIfExist(self, arr: list[int]) -> bool:
array_double = {}
for i in arr:
try:
array_double[i] += 1
except KeyError:
array_double[i] = 1
for i in arr:
if i == 0:
if array_double[i] == 2:
return True
elif (i + i) in array_double:
return True
return False
@pytest.mark.parametrize(
("arr", "result"),
[
([10, 2, 5, 3], True),
([-2, 0, 10, -19, 4, 6, -8], False),
([-2, 4, 0, 1], False),
([-2, 4, 0, 1, 0], True),
([0, 0], True),
],
)
def test_basic(arr: list[int], result: bool) -> None:
assert result == Solution().checkIfExist(arr)
|
Check if n and double of n exists
|
Check if n and double of n exists
|
Python
|
mit
|
anu-ka/coding-problems,anu-ka/coding-problems,anu-ka/coding-problems
|
Check if n and double of n exists
|
# https://leetcode.com/problems/check-if-n-and-its-double-exist/
# Given an array arr of integers, check if there exists two integers N and M such that
# N is the double of M ( i.e. N = 2 * M).
# More formally check if there exists two indices i and j such that :
# i != j
# 0 <= i, j < arr.length
# arr[i] == 2 * arr[j]
import pytest
class Solution:
def checkIfExist(self, arr: list[int]) -> bool:
array_double = {}
for i in arr:
try:
array_double[i] += 1
except KeyError:
array_double[i] = 1
for i in arr:
if i == 0:
if array_double[i] == 2:
return True
elif (i + i) in array_double:
return True
return False
@pytest.mark.parametrize(
("arr", "result"),
[
([10, 2, 5, 3], True),
([-2, 0, 10, -19, 4, 6, -8], False),
([-2, 4, 0, 1], False),
([-2, 4, 0, 1, 0], True),
([0, 0], True),
],
)
def test_basic(arr: list[int], result: bool) -> None:
assert result == Solution().checkIfExist(arr)
|
<commit_before><commit_msg>Check if n and double of n exists<commit_after>
|
# https://leetcode.com/problems/check-if-n-and-its-double-exist/
# Given an array arr of integers, check if there exists two integers N and M such that
# N is the double of M ( i.e. N = 2 * M).
# More formally check if there exists two indices i and j such that :
# i != j
# 0 <= i, j < arr.length
# arr[i] == 2 * arr[j]
import pytest
class Solution:
def checkIfExist(self, arr: list[int]) -> bool:
array_double = {}
for i in arr:
try:
array_double[i] += 1
except KeyError:
array_double[i] = 1
for i in arr:
if i == 0:
if array_double[i] == 2:
return True
elif (i + i) in array_double:
return True
return False
@pytest.mark.parametrize(
("arr", "result"),
[
([10, 2, 5, 3], True),
([-2, 0, 10, -19, 4, 6, -8], False),
([-2, 4, 0, 1], False),
([-2, 4, 0, 1, 0], True),
([0, 0], True),
],
)
def test_basic(arr: list[int], result: bool) -> None:
assert result == Solution().checkIfExist(arr)
|
Check if n and double of n exists# https://leetcode.com/problems/check-if-n-and-its-double-exist/
# Given an array arr of integers, check if there exists two integers N and M such that
# N is the double of M ( i.e. N = 2 * M).
# More formally check if there exists two indices i and j such that :
# i != j
# 0 <= i, j < arr.length
# arr[i] == 2 * arr[j]
import pytest
class Solution:
def checkIfExist(self, arr: list[int]) -> bool:
array_double = {}
for i in arr:
try:
array_double[i] += 1
except KeyError:
array_double[i] = 1
for i in arr:
if i == 0:
if array_double[i] == 2:
return True
elif (i + i) in array_double:
return True
return False
@pytest.mark.parametrize(
("arr", "result"),
[
([10, 2, 5, 3], True),
([-2, 0, 10, -19, 4, 6, -8], False),
([-2, 4, 0, 1], False),
([-2, 4, 0, 1, 0], True),
([0, 0], True),
],
)
def test_basic(arr: list[int], result: bool) -> None:
assert result == Solution().checkIfExist(arr)
|
<commit_before><commit_msg>Check if n and double of n exists<commit_after># https://leetcode.com/problems/check-if-n-and-its-double-exist/
# Given an array arr of integers, check if there exists two integers N and M such that
# N is the double of M ( i.e. N = 2 * M).
# More formally check if there exists two indices i and j such that :
# i != j
# 0 <= i, j < arr.length
# arr[i] == 2 * arr[j]
import pytest
class Solution:
def checkIfExist(self, arr: list[int]) -> bool:
array_double = {}
for i in arr:
try:
array_double[i] += 1
except KeyError:
array_double[i] = 1
for i in arr:
if i == 0:
if array_double[i] == 2:
return True
elif (i + i) in array_double:
return True
return False
@pytest.mark.parametrize(
("arr", "result"),
[
([10, 2, 5, 3], True),
([-2, 0, 10, -19, 4, 6, -8], False),
([-2, 4, 0, 1], False),
([-2, 4, 0, 1, 0], True),
([0, 0], True),
],
)
def test_basic(arr: list[int], result: bool) -> None:
assert result == Solution().checkIfExist(arr)
|
|
3b2b72436459dcf58ab07466d5a2ed0425b17962
|
algorithms/diagonal_difference/kevin.py
|
algorithms/diagonal_difference/kevin.py
|
#!/usr/bin/env python
from typing import List
def get_matrix_row_from_input() -> List[int]:
return [int(index) for index in input().strip().split(' ')]
n = int(input().strip())
primary_diag_sum = 0
secondary_diag_sum = 0
for row_count in range(n):
row = get_matrix_row_from_input()
primary_diag_sum += row[row_count]
secondary_diag_sum += row[-1 - row_count]
print(abs(primary_diag_sum - secondary_diag_sum))
|
Add Diagonal Difference HackerRank Problem
|
Add Diagonal Difference HackerRank Problem
* https://www.hackerrank.com/challenges/diagonal-difference
|
Python
|
mit
|
PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank
|
Add Diagonal Difference HackerRank Problem
* https://www.hackerrank.com/challenges/diagonal-difference
|
#!/usr/bin/env python
from typing import List
def get_matrix_row_from_input() -> List[int]:
return [int(index) for index in input().strip().split(' ')]
n = int(input().strip())
primary_diag_sum = 0
secondary_diag_sum = 0
for row_count in range(n):
row = get_matrix_row_from_input()
primary_diag_sum += row[row_count]
secondary_diag_sum += row[-1 - row_count]
print(abs(primary_diag_sum - secondary_diag_sum))
|
<commit_before><commit_msg>Add Diagonal Difference HackerRank Problem
* https://www.hackerrank.com/challenges/diagonal-difference<commit_after>
|
#!/usr/bin/env python
from typing import List
def get_matrix_row_from_input() -> List[int]:
return [int(index) for index in input().strip().split(' ')]
n = int(input().strip())
primary_diag_sum = 0
secondary_diag_sum = 0
for row_count in range(n):
row = get_matrix_row_from_input()
primary_diag_sum += row[row_count]
secondary_diag_sum += row[-1 - row_count]
print(abs(primary_diag_sum - secondary_diag_sum))
|
Add Diagonal Difference HackerRank Problem
* https://www.hackerrank.com/challenges/diagonal-difference#!/usr/bin/env python
from typing import List
def get_matrix_row_from_input() -> List[int]:
return [int(index) for index in input().strip().split(' ')]
n = int(input().strip())
primary_diag_sum = 0
secondary_diag_sum = 0
for row_count in range(n):
row = get_matrix_row_from_input()
primary_diag_sum += row[row_count]
secondary_diag_sum += row[-1 - row_count]
print(abs(primary_diag_sum - secondary_diag_sum))
|
<commit_before><commit_msg>Add Diagonal Difference HackerRank Problem
* https://www.hackerrank.com/challenges/diagonal-difference<commit_after>#!/usr/bin/env python
from typing import List
def get_matrix_row_from_input() -> List[int]:
return [int(index) for index in input().strip().split(' ')]
n = int(input().strip())
primary_diag_sum = 0
secondary_diag_sum = 0
for row_count in range(n):
row = get_matrix_row_from_input()
primary_diag_sum += row[row_count]
secondary_diag_sum += row[-1 - row_count]
print(abs(primary_diag_sum - secondary_diag_sum))
|
|
19c652d9b5740e08219d1bc5688f663c7c7ab848
|
test/test_emo_cls.py
|
test/test_emo_cls.py
|
#!/usr/bin/env python
# -*- encoding: utf-8
import unittest
from src.emo_cls import EmoClassifier
from src.ec_settings import POS, NEG, NO_CLASS
ec = EmoClassifier(terms_fn=None,
bigrams_fn=None,
trigrams_fn=None,
terms_by_root_form_fn=None,
verbose=False,
is_dump_cls=False,
is_load_cached_cls=False)
class TestEmoClassifier(unittest.TestCase):
def test_classify_v1(self):
self.assertEqual(ec.classify(':)'), (POS, 1.0))
def test_classify_v2(self):
self.assertEqual(ec.classify('zzzzz'), (NO_CLASS, 1.0))
if __name__ == '__main__':
unittest.main()
|
Add tests for classify() method
|
Add tests for classify() method
|
Python
|
mit
|
wojtekwalczak/EmoClassifier
|
Add tests for classify() method
|
#!/usr/bin/env python
# -*- encoding: utf-8
import unittest
from src.emo_cls import EmoClassifier
from src.ec_settings import POS, NEG, NO_CLASS
ec = EmoClassifier(terms_fn=None,
bigrams_fn=None,
trigrams_fn=None,
terms_by_root_form_fn=None,
verbose=False,
is_dump_cls=False,
is_load_cached_cls=False)
class TestEmoClassifier(unittest.TestCase):
def test_classify_v1(self):
self.assertEqual(ec.classify(':)'), (POS, 1.0))
def test_classify_v2(self):
self.assertEqual(ec.classify('zzzzz'), (NO_CLASS, 1.0))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for classify() method<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8
import unittest
from src.emo_cls import EmoClassifier
from src.ec_settings import POS, NEG, NO_CLASS
ec = EmoClassifier(terms_fn=None,
bigrams_fn=None,
trigrams_fn=None,
terms_by_root_form_fn=None,
verbose=False,
is_dump_cls=False,
is_load_cached_cls=False)
class TestEmoClassifier(unittest.TestCase):
def test_classify_v1(self):
self.assertEqual(ec.classify(':)'), (POS, 1.0))
def test_classify_v2(self):
self.assertEqual(ec.classify('zzzzz'), (NO_CLASS, 1.0))
if __name__ == '__main__':
unittest.main()
|
Add tests for classify() method#!/usr/bin/env python
# -*- encoding: utf-8
import unittest
from src.emo_cls import EmoClassifier
from src.ec_settings import POS, NEG, NO_CLASS
ec = EmoClassifier(terms_fn=None,
bigrams_fn=None,
trigrams_fn=None,
terms_by_root_form_fn=None,
verbose=False,
is_dump_cls=False,
is_load_cached_cls=False)
class TestEmoClassifier(unittest.TestCase):
def test_classify_v1(self):
self.assertEqual(ec.classify(':)'), (POS, 1.0))
def test_classify_v2(self):
self.assertEqual(ec.classify('zzzzz'), (NO_CLASS, 1.0))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for classify() method<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8
import unittest
from src.emo_cls import EmoClassifier
from src.ec_settings import POS, NEG, NO_CLASS
ec = EmoClassifier(terms_fn=None,
bigrams_fn=None,
trigrams_fn=None,
terms_by_root_form_fn=None,
verbose=False,
is_dump_cls=False,
is_load_cached_cls=False)
class TestEmoClassifier(unittest.TestCase):
def test_classify_v1(self):
self.assertEqual(ec.classify(':)'), (POS, 1.0))
def test_classify_v2(self):
self.assertEqual(ec.classify('zzzzz'), (NO_CLASS, 1.0))
if __name__ == '__main__':
unittest.main()
|
|
f4fcb99cc9dec1cf68697520b959569e937dcd33
|
tests/test_region.py
|
tests/test_region.py
|
from bioframe.region import parse_region
import pytest
def test_parse_region():
# UCSC-style names
assert parse_region("chr21") == ("chr21", 0, None)
assert parse_region("chr21:1000-2000") == ("chr21", 1000, 2000)
assert parse_region("chr21:1,000-2,000") == ("chr21", 1000, 2000)
# Ensembl style names
assert parse_region("6") == ("6", 0, None)
assert parse_region("6:1000-2000") == ("6", 1000, 2000)
assert parse_region("6:1,000-2,000") == ("6", 1000, 2000)
# FASTA style names
assert parse_region("gb|accession|locus") == ("gb|accession|locus", 0, None,)
assert parse_region("gb|accession|locus:1000-2000") == (
"gb|accession|locus",
1000,
2000,
)
assert parse_region("gb|accession|locus:1,000-2,000") == (
"gb|accession|locus",
1000,
2000,
)
# Punctuation in names (aside from :)
assert parse_region("name-with-hyphens-") == ("name-with-hyphens-", 0, None,)
assert parse_region("GL000207.1") == ("GL000207.1", 0, None)
assert parse_region("GL000207.1:1000-2000") == ("GL000207.1", 1000, 2000)
# Trailing dash
assert parse_region("chr21:1000-") == ("chr21", 1000, None)
# Humanized units
assert parse_region("6:1kb-2kb") == ("6", 1000, 2000)
assert parse_region("6:1k-2000") == ("6", 1000, 2000)
assert parse_region("6:1kb-2M") == ("6", 1000, 2000000)
assert parse_region("6:1Gb-") == ("6", 1000000000, None)
with pytest.raises(ValueError):
parse_region("chr1:2,000-1,000") # reverse selection
parse_region("chr1::1000-2000") # more than one colon
|
Add tests to make CI happy
|
Add tests to make CI happy
|
Python
|
mit
|
open2c/bioframe
|
Add tests to make CI happy
|
from bioframe.region import parse_region
import pytest
def test_parse_region():
# UCSC-style names
assert parse_region("chr21") == ("chr21", 0, None)
assert parse_region("chr21:1000-2000") == ("chr21", 1000, 2000)
assert parse_region("chr21:1,000-2,000") == ("chr21", 1000, 2000)
# Ensembl style names
assert parse_region("6") == ("6", 0, None)
assert parse_region("6:1000-2000") == ("6", 1000, 2000)
assert parse_region("6:1,000-2,000") == ("6", 1000, 2000)
# FASTA style names
assert parse_region("gb|accession|locus") == ("gb|accession|locus", 0, None,)
assert parse_region("gb|accession|locus:1000-2000") == (
"gb|accession|locus",
1000,
2000,
)
assert parse_region("gb|accession|locus:1,000-2,000") == (
"gb|accession|locus",
1000,
2000,
)
# Punctuation in names (aside from :)
assert parse_region("name-with-hyphens-") == ("name-with-hyphens-", 0, None,)
assert parse_region("GL000207.1") == ("GL000207.1", 0, None)
assert parse_region("GL000207.1:1000-2000") == ("GL000207.1", 1000, 2000)
# Trailing dash
assert parse_region("chr21:1000-") == ("chr21", 1000, None)
# Humanized units
assert parse_region("6:1kb-2kb") == ("6", 1000, 2000)
assert parse_region("6:1k-2000") == ("6", 1000, 2000)
assert parse_region("6:1kb-2M") == ("6", 1000, 2000000)
assert parse_region("6:1Gb-") == ("6", 1000000000, None)
with pytest.raises(ValueError):
parse_region("chr1:2,000-1,000") # reverse selection
parse_region("chr1::1000-2000") # more than one colon
|
<commit_before><commit_msg>Add tests to make CI happy<commit_after>
|
from bioframe.region import parse_region
import pytest
def test_parse_region():
# UCSC-style names
assert parse_region("chr21") == ("chr21", 0, None)
assert parse_region("chr21:1000-2000") == ("chr21", 1000, 2000)
assert parse_region("chr21:1,000-2,000") == ("chr21", 1000, 2000)
# Ensembl style names
assert parse_region("6") == ("6", 0, None)
assert parse_region("6:1000-2000") == ("6", 1000, 2000)
assert parse_region("6:1,000-2,000") == ("6", 1000, 2000)
# FASTA style names
assert parse_region("gb|accession|locus") == ("gb|accession|locus", 0, None,)
assert parse_region("gb|accession|locus:1000-2000") == (
"gb|accession|locus",
1000,
2000,
)
assert parse_region("gb|accession|locus:1,000-2,000") == (
"gb|accession|locus",
1000,
2000,
)
# Punctuation in names (aside from :)
assert parse_region("name-with-hyphens-") == ("name-with-hyphens-", 0, None,)
assert parse_region("GL000207.1") == ("GL000207.1", 0, None)
assert parse_region("GL000207.1:1000-2000") == ("GL000207.1", 1000, 2000)
# Trailing dash
assert parse_region("chr21:1000-") == ("chr21", 1000, None)
# Humanized units
assert parse_region("6:1kb-2kb") == ("6", 1000, 2000)
assert parse_region("6:1k-2000") == ("6", 1000, 2000)
assert parse_region("6:1kb-2M") == ("6", 1000, 2000000)
assert parse_region("6:1Gb-") == ("6", 1000000000, None)
with pytest.raises(ValueError):
parse_region("chr1:2,000-1,000") # reverse selection
parse_region("chr1::1000-2000") # more than one colon
|
Add tests to make CI happyfrom bioframe.region import parse_region
import pytest
def test_parse_region():
# UCSC-style names
assert parse_region("chr21") == ("chr21", 0, None)
assert parse_region("chr21:1000-2000") == ("chr21", 1000, 2000)
assert parse_region("chr21:1,000-2,000") == ("chr21", 1000, 2000)
# Ensembl style names
assert parse_region("6") == ("6", 0, None)
assert parse_region("6:1000-2000") == ("6", 1000, 2000)
assert parse_region("6:1,000-2,000") == ("6", 1000, 2000)
# FASTA style names
assert parse_region("gb|accession|locus") == ("gb|accession|locus", 0, None,)
assert parse_region("gb|accession|locus:1000-2000") == (
"gb|accession|locus",
1000,
2000,
)
assert parse_region("gb|accession|locus:1,000-2,000") == (
"gb|accession|locus",
1000,
2000,
)
# Punctuation in names (aside from :)
assert parse_region("name-with-hyphens-") == ("name-with-hyphens-", 0, None,)
assert parse_region("GL000207.1") == ("GL000207.1", 0, None)
assert parse_region("GL000207.1:1000-2000") == ("GL000207.1", 1000, 2000)
# Trailing dash
assert parse_region("chr21:1000-") == ("chr21", 1000, None)
# Humanized units
assert parse_region("6:1kb-2kb") == ("6", 1000, 2000)
assert parse_region("6:1k-2000") == ("6", 1000, 2000)
assert parse_region("6:1kb-2M") == ("6", 1000, 2000000)
assert parse_region("6:1Gb-") == ("6", 1000000000, None)
with pytest.raises(ValueError):
parse_region("chr1:2,000-1,000") # reverse selection
parse_region("chr1::1000-2000") # more than one colon
|
<commit_before><commit_msg>Add tests to make CI happy<commit_after>from bioframe.region import parse_region
import pytest
def test_parse_region():
# UCSC-style names
assert parse_region("chr21") == ("chr21", 0, None)
assert parse_region("chr21:1000-2000") == ("chr21", 1000, 2000)
assert parse_region("chr21:1,000-2,000") == ("chr21", 1000, 2000)
# Ensembl style names
assert parse_region("6") == ("6", 0, None)
assert parse_region("6:1000-2000") == ("6", 1000, 2000)
assert parse_region("6:1,000-2,000") == ("6", 1000, 2000)
# FASTA style names
assert parse_region("gb|accession|locus") == ("gb|accession|locus", 0, None,)
assert parse_region("gb|accession|locus:1000-2000") == (
"gb|accession|locus",
1000,
2000,
)
assert parse_region("gb|accession|locus:1,000-2,000") == (
"gb|accession|locus",
1000,
2000,
)
# Punctuation in names (aside from :)
assert parse_region("name-with-hyphens-") == ("name-with-hyphens-", 0, None,)
assert parse_region("GL000207.1") == ("GL000207.1", 0, None)
assert parse_region("GL000207.1:1000-2000") == ("GL000207.1", 1000, 2000)
# Trailing dash
assert parse_region("chr21:1000-") == ("chr21", 1000, None)
# Humanized units
assert parse_region("6:1kb-2kb") == ("6", 1000, 2000)
assert parse_region("6:1k-2000") == ("6", 1000, 2000)
assert parse_region("6:1kb-2M") == ("6", 1000, 2000000)
assert parse_region("6:1Gb-") == ("6", 1000000000, None)
with pytest.raises(ValueError):
parse_region("chr1:2,000-1,000") # reverse selection
parse_region("chr1::1000-2000") # more than one colon
|
|
7c7d0701dddf54006ba87e3e785caab6e53bd68a
|
config.py
|
config.py
|
import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "pLuj5kat5auC9Ve")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
|
import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "dev-token")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
|
Use a dev api token
|
Use a dev api token
|
Python
|
mit
|
alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin,gov-cjwaszczuk/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin
|
import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "pLuj5kat5auC9Ve")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
Use a dev api token
|
import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "dev-token")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
|
<commit_before>import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "pLuj5kat5auC9Ve")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
<commit_msg>Use a dev api token<commit_after>
|
import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "dev-token")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
|
import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "pLuj5kat5auC9Ve")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
Use a dev api tokenimport os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "dev-token")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
|
<commit_before>import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "pLuj5kat5auC9Ve")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
<commit_msg>Use a dev api token<commit_after>import os
class Config(object):
DEBUG = False
ASSETS_DEBUG = False
cache = False
manifest = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/notifications_admin'
MAX_FAILED_LOGIN_COUNT = 10
PASS_SECRET_KEY = 'secret-key-unique-changeme'
SESSION_COOKIE_NAME = 'notify_admin_session'
SESSION_COOKIE_PATH = '/admin'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
NOTIFY_DATA_API_URL = os.getenv('NOTIFY_API_URL', "http://localhost:6001")
NOTIFY_DATA_API_AUTH_TOKEN = os.getenv('NOTIFY_API_TOKEN', "dev-token")
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret-key'
HTTP_PROTOCOL = 'http'
DANGEROUS_SALT = 'itsdangeroussalt'
class Development(Config):
DEBUG = True
class Test(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/test_notifications_admin'
WTF_CSRF_ENABLED = False
class Live(Config):
DEBUG = False
HTTP_PROTOCOL = 'https'
configs = {
'development': Development,
'test': Test
}
|
ed05b50ca560db868e760629de883695a787c0cd
|
tests/test_read_parsers.py
|
tests/test_read_parsers.py
|
# Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
import khmer_tst_utils as utils
def test_DEFAULT_ARGUMENTS( ):
read_names = [ ]
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser( utils.get_test_data( "random-20-a.fa" ) )
for read in rparser:
read_names.append( int( read.name ) )
# "Derandomize".
read_names.sort( )
# Each read number should match the corresponding name.
for m, n in enumerate( read_names ):
assert m == n
# TODO: Write more tests.
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
Create test harness for 'Read' and 'ReadParser' classes. (Note: Need to finish writing tests.)
|
Create test harness for 'Read' and 'ReadParser' classes. (Note: Need to finish writing tests.)
|
Python
|
bsd-3-clause
|
kdmurray91/khmer,ged-lab/khmer,jas14/khmer,Winterflower/khmer,Winterflower/khmer,F1000Research/khmer,kdmurray91/khmer,F1000Research/khmer,souravsingh/khmer,souravsingh/khmer,ged-lab/khmer,souravsingh/khmer,ged-lab/khmer,Winterflower/khmer,F1000Research/khmer,kdmurray91/khmer,jas14/khmer,jas14/khmer
|
Create test harness for 'Read' and 'ReadParser' classes. (Note: Need to finish writing tests.)
|
# Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
import khmer_tst_utils as utils
def test_DEFAULT_ARGUMENTS( ):
read_names = [ ]
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser( utils.get_test_data( "random-20-a.fa" ) )
for read in rparser:
read_names.append( int( read.name ) )
# "Derandomize".
read_names.sort( )
# Each read number should match the corresponding name.
for m, n in enumerate( read_names ):
assert m == n
# TODO: Write more tests.
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
<commit_before><commit_msg>Create test harness for 'Read' and 'ReadParser' classes. (Note: Need to finish writing tests.)<commit_after>
|
# Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
import khmer_tst_utils as utils
def test_DEFAULT_ARGUMENTS( ):
read_names = [ ]
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser( utils.get_test_data( "random-20-a.fa" ) )
for read in rparser:
read_names.append( int( read.name ) )
# "Derandomize".
read_names.sort( )
# Each read number should match the corresponding name.
for m, n in enumerate( read_names ):
assert m == n
# TODO: Write more tests.
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
Create test harness for 'Read' and 'ReadParser' classes. (Note: Need to finish writing tests.)# Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
import khmer_tst_utils as utils
def test_DEFAULT_ARGUMENTS( ):
read_names = [ ]
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser( utils.get_test_data( "random-20-a.fa" ) )
for read in rparser:
read_names.append( int( read.name ) )
# "Derandomize".
read_names.sort( )
# Each read number should match the corresponding name.
for m, n in enumerate( read_names ):
assert m == n
# TODO: Write more tests.
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
<commit_before><commit_msg>Create test harness for 'Read' and 'ReadParser' classes. (Note: Need to finish writing tests.)<commit_after># Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
import khmer_tst_utils as utils
def test_DEFAULT_ARGUMENTS( ):
read_names = [ ]
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser( utils.get_test_data( "random-20-a.fa" ) )
for read in rparser:
read_names.append( int( read.name ) )
# "Derandomize".
read_names.sort( )
# Each read number should match the corresponding name.
for m, n in enumerate( read_names ):
assert m == n
# TODO: Write more tests.
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
|
511522f2e0d6399191d79e393ed6f14d3a843550
|
range_ghost_test.py
|
range_ghost_test.py
|
from dtest import Tester
from tools import *
from assertions import *
import os, sys, time
from ccmlib.cluster import Cluster
class TestRangeGhosts(Tester):
def ghosts_test(self):
""" Check range ghost are correctly removed by the system """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0)
rows = 1000
for i in xrange(0, rows):
cursor.execute("UPDATE cf SET c = 'value' WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
for i in xrange(0, rows/2):
cursor.execute("DELETE FROM cf WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
time.sleep(1) # make sure tombstones are collected
node1.compact()
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows/2, res
|
Add test to check range ghost are removed
|
Add test to check range ghost are removed
|
Python
|
apache-2.0
|
thobbs/cassandra-dtest,snazy/cassandra-dtest,carlyeks/cassandra-dtest,krummas/cassandra-dtest,beobal/cassandra-dtest,thobbs/cassandra-dtest,tjake/cassandra-dtest,pcmanus/cassandra-dtest,spodkowinski/cassandra-dtest,aweisberg/cassandra-dtest,stef1927/cassandra-dtest,pauloricardomg/cassandra-dtest,snazy/cassandra-dtest,mambocab/cassandra-dtest,beobal/cassandra-dtest,spodkowinski/cassandra-dtest,yukim/cassandra-dtest,aweisberg/cassandra-dtest,riptano/cassandra-dtest,iamaleksey/cassandra-dtest,blerer/cassandra-dtest,bdeggleston/cassandra-dtest,mambocab/cassandra-dtest,blerer/cassandra-dtest,pauloricardomg/cassandra-dtest,bdeggleston/cassandra-dtest,riptano/cassandra-dtest,josh-mckenzie/cassandra-dtest,krummas/cassandra-dtest,stef1927/cassandra-dtest,iamaleksey/cassandra-dtest,carlyeks/cassandra-dtest
|
Add test to check range ghost are removed
|
from dtest import Tester
from tools import *
from assertions import *
import os, sys, time
from ccmlib.cluster import Cluster
class TestRangeGhosts(Tester):
def ghosts_test(self):
""" Check range ghost are correctly removed by the system """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0)
rows = 1000
for i in xrange(0, rows):
cursor.execute("UPDATE cf SET c = 'value' WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
for i in xrange(0, rows/2):
cursor.execute("DELETE FROM cf WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
time.sleep(1) # make sure tombstones are collected
node1.compact()
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows/2, res
|
<commit_before><commit_msg>Add test to check range ghost are removed<commit_after>
|
from dtest import Tester
from tools import *
from assertions import *
import os, sys, time
from ccmlib.cluster import Cluster
class TestRangeGhosts(Tester):
def ghosts_test(self):
""" Check range ghost are correctly removed by the system """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0)
rows = 1000
for i in xrange(0, rows):
cursor.execute("UPDATE cf SET c = 'value' WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
for i in xrange(0, rows/2):
cursor.execute("DELETE FROM cf WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
time.sleep(1) # make sure tombstones are collected
node1.compact()
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows/2, res
|
Add test to check range ghost are removedfrom dtest import Tester
from tools import *
from assertions import *
import os, sys, time
from ccmlib.cluster import Cluster
class TestRangeGhosts(Tester):
def ghosts_test(self):
""" Check range ghost are correctly removed by the system """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0)
rows = 1000
for i in xrange(0, rows):
cursor.execute("UPDATE cf SET c = 'value' WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
for i in xrange(0, rows/2):
cursor.execute("DELETE FROM cf WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
time.sleep(1) # make sure tombstones are collected
node1.compact()
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows/2, res
|
<commit_before><commit_msg>Add test to check range ghost are removed<commit_after>from dtest import Tester
from tools import *
from assertions import *
import os, sys, time
from ccmlib.cluster import Cluster
class TestRangeGhosts(Tester):
def ghosts_test(self):
""" Check range ghost are correctly removed by the system """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0)
rows = 1000
for i in xrange(0, rows):
cursor.execute("UPDATE cf SET c = 'value' WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
for i in xrange(0, rows/2):
cursor.execute("DELETE FROM cf WHERE key = k%i" % i)
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows, res
node1.flush()
time.sleep(1) # make sure tombstones are collected
node1.compact()
cursor.execute("SELECT * FROM cf LIMIT 10000")
res = cursor.fetchall()
assert len(res) == rows/2, res
|
|
2ea737e3ef9d15e61504c8e35314a21b159fe830
|
pombola/hansard/management/commands/hansard_email_unmatched_speakers.py
|
pombola/hansard/management/commands/hansard_email_unmatched_speakers.py
|
from django.core.management.base import NoArgsCommand
from django.core.mail import send_mail
from pombola.hansard.models import Alias
class Command(NoArgsCommand):
help = 'Email a list of all the speaker names that have not been matched up to a real person'
def handle_noargs(self, **options):
unassigned = Alias.objects.unassigned()
count = unassigned.count()
if count is 0:
return
body = []
body.append("There are {} Hansard speaker names that could not be matched to a person\n".format(count))
body.append("Please go to http://info.mzalendo.com/admin/hansard/alias/ to update list\n")
for alias in unassigned:
body.append(u"\t'{}'".format(alias.alias))
send_mail(
'[Hansard] Unmatched speaker names report',
'\n'.join(body),
'no-reply@info.mzalendo.com',
['mzalendo-managers@mysociety.org'],
)
|
Add command to email unassigned aliases
|
[KE] Add command to email unassigned aliases
This is currently done as part of the `bin/update_hansard.bash` script,
which runs the `hansard_list_unmatched_speakers` command. We want it to
be separate so that errors from running the Hansard update script go to
developers, and the unmatched aliases go to the Mzalendo managers.
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
[KE] Add command to email unassigned aliases
This is currently done as part of the `bin/update_hansard.bash` script,
which runs the `hansard_list_unmatched_speakers` command. We want it to
be separate so that errors from running the Hansard update script go to
developers, and the unmatched aliases go to the Mzalendo managers.
|
from django.core.management.base import NoArgsCommand
from django.core.mail import send_mail
from pombola.hansard.models import Alias
class Command(NoArgsCommand):
help = 'Email a list of all the speaker names that have not been matched up to a real person'
def handle_noargs(self, **options):
unassigned = Alias.objects.unassigned()
count = unassigned.count()
if count is 0:
return
body = []
body.append("There are {} Hansard speaker names that could not be matched to a person\n".format(count))
body.append("Please go to http://info.mzalendo.com/admin/hansard/alias/ to update list\n")
for alias in unassigned:
body.append(u"\t'{}'".format(alias.alias))
send_mail(
'[Hansard] Unmatched speaker names report',
'\n'.join(body),
'no-reply@info.mzalendo.com',
['mzalendo-managers@mysociety.org'],
)
|
<commit_before><commit_msg>[KE] Add command to email unassigned aliases
This is currently done as part of the `bin/update_hansard.bash` script,
which runs the `hansard_list_unmatched_speakers` command. We want it to
be separate so that errors from running the Hansard update script go to
developers, and the unmatched aliases go to the Mzalendo managers.<commit_after>
|
from django.core.management.base import NoArgsCommand
from django.core.mail import send_mail
from pombola.hansard.models import Alias
class Command(NoArgsCommand):
help = 'Email a list of all the speaker names that have not been matched up to a real person'
def handle_noargs(self, **options):
unassigned = Alias.objects.unassigned()
count = unassigned.count()
if count is 0:
return
body = []
body.append("There are {} Hansard speaker names that could not be matched to a person\n".format(count))
body.append("Please go to http://info.mzalendo.com/admin/hansard/alias/ to update list\n")
for alias in unassigned:
body.append(u"\t'{}'".format(alias.alias))
send_mail(
'[Hansard] Unmatched speaker names report',
'\n'.join(body),
'no-reply@info.mzalendo.com',
['mzalendo-managers@mysociety.org'],
)
|
[KE] Add command to email unassigned aliases
This is currently done as part of the `bin/update_hansard.bash` script,
which runs the `hansard_list_unmatched_speakers` command. We want it to
be separate so that errors from running the Hansard update script go to
developers, and the unmatched aliases go to the Mzalendo managers.from django.core.management.base import NoArgsCommand
from django.core.mail import send_mail
from pombola.hansard.models import Alias
class Command(NoArgsCommand):
help = 'Email a list of all the speaker names that have not been matched up to a real person'
def handle_noargs(self, **options):
unassigned = Alias.objects.unassigned()
count = unassigned.count()
if count is 0:
return
body = []
body.append("There are {} Hansard speaker names that could not be matched to a person\n".format(count))
body.append("Please go to http://info.mzalendo.com/admin/hansard/alias/ to update list\n")
for alias in unassigned:
body.append(u"\t'{}'".format(alias.alias))
send_mail(
'[Hansard] Unmatched speaker names report',
'\n'.join(body),
'no-reply@info.mzalendo.com',
['mzalendo-managers@mysociety.org'],
)
|
<commit_before><commit_msg>[KE] Add command to email unassigned aliases
This is currently done as part of the `bin/update_hansard.bash` script,
which runs the `hansard_list_unmatched_speakers` command. We want it to
be separate so that errors from running the Hansard update script go to
developers, and the unmatched aliases go to the Mzalendo managers.<commit_after>from django.core.management.base import NoArgsCommand
from django.core.mail import send_mail
from pombola.hansard.models import Alias
class Command(NoArgsCommand):
help = 'Email a list of all the speaker names that have not been matched up to a real person'
def handle_noargs(self, **options):
unassigned = Alias.objects.unassigned()
count = unassigned.count()
if count is 0:
return
body = []
body.append("There are {} Hansard speaker names that could not be matched to a person\n".format(count))
body.append("Please go to http://info.mzalendo.com/admin/hansard/alias/ to update list\n")
for alias in unassigned:
body.append(u"\t'{}'".format(alias.alias))
send_mail(
'[Hansard] Unmatched speaker names report',
'\n'.join(body),
'no-reply@info.mzalendo.com',
['mzalendo-managers@mysociety.org'],
)
|
|
72c6c2a4ea2e3ddaacad4fb5376b6a48a12db200
|
logistic_regression.py
|
logistic_regression.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation
"""2-class logistic regression by keras"""
def plot_data(X, y):
positive = [i for i in range(len(y)) if y[i] == 1]
negative = [i for i in range(len(y)) if y[i] == 0]
plt.scatter(X[positive, 0], X[positive, 1], c='red', marker='o', label='positive')
plt.scatter(X[negative, 0], X[negative, 1], c='blue', marker='o', label='negative')
if __name__ == '__main__':
# fix random seed
seed = 1
np.random.seed(seed)
# load training data
data = np.genfromtxt(os.path.join('data', 'ex2data1.txt'), delimiter=',')
X = data[:, (0, 1)]
y = data[:, 2]
# plot training data
plt.figure(1)
plot_data(X, y)
# create the model
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# fit the model
model.fit(X, y, nb_epoch=1000, batch_size=1, verbose=1)
# get the learned weight
weights = model.layers[0].get_weights()
w1 = weights[0][0, 0]
w2 = weights[0][1, 0]
b = weights[1][0]
# draw decision boundary
plt.figure(1)
xmin, xmax = min(X[:, 1]), max(X[:, 1])
xs = np.linspace(xmin, xmax, 100)
ys = [- (w1 / w2) * x - (b / w2) for x in xs]
plt.plot(xs, ys, 'b-', label='decision boundary')
plt.xlabel('x1')
plt.ylabel('x2')
plt.xlim((30, 100))
plt.ylim((30, 100))
plt.legend()
plt.show()
|
Add a logistic regression example
|
Add a logistic regression example
|
Python
|
mit
|
aidiary/keras_examples,aidiary/keras_examples
|
Add a logistic regression example
|
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation
"""2-class logistic regression by keras"""
def plot_data(X, y):
positive = [i for i in range(len(y)) if y[i] == 1]
negative = [i for i in range(len(y)) if y[i] == 0]
plt.scatter(X[positive, 0], X[positive, 1], c='red', marker='o', label='positive')
plt.scatter(X[negative, 0], X[negative, 1], c='blue', marker='o', label='negative')
if __name__ == '__main__':
# fix random seed
seed = 1
np.random.seed(seed)
# load training data
data = np.genfromtxt(os.path.join('data', 'ex2data1.txt'), delimiter=',')
X = data[:, (0, 1)]
y = data[:, 2]
# plot training data
plt.figure(1)
plot_data(X, y)
# create the model
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# fit the model
model.fit(X, y, nb_epoch=1000, batch_size=1, verbose=1)
# get the learned weight
weights = model.layers[0].get_weights()
w1 = weights[0][0, 0]
w2 = weights[0][1, 0]
b = weights[1][0]
# draw decision boundary
plt.figure(1)
xmin, xmax = min(X[:, 1]), max(X[:, 1])
xs = np.linspace(xmin, xmax, 100)
ys = [- (w1 / w2) * x - (b / w2) for x in xs]
plt.plot(xs, ys, 'b-', label='decision boundary')
plt.xlabel('x1')
plt.ylabel('x2')
plt.xlim((30, 100))
plt.ylim((30, 100))
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add a logistic regression example<commit_after>
|
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation
"""2-class logistic regression by keras"""
def plot_data(X, y):
positive = [i for i in range(len(y)) if y[i] == 1]
negative = [i for i in range(len(y)) if y[i] == 0]
plt.scatter(X[positive, 0], X[positive, 1], c='red', marker='o', label='positive')
plt.scatter(X[negative, 0], X[negative, 1], c='blue', marker='o', label='negative')
if __name__ == '__main__':
# fix random seed
seed = 1
np.random.seed(seed)
# load training data
data = np.genfromtxt(os.path.join('data', 'ex2data1.txt'), delimiter=',')
X = data[:, (0, 1)]
y = data[:, 2]
# plot training data
plt.figure(1)
plot_data(X, y)
# create the model
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# fit the model
model.fit(X, y, nb_epoch=1000, batch_size=1, verbose=1)
# get the learned weight
weights = model.layers[0].get_weights()
w1 = weights[0][0, 0]
w2 = weights[0][1, 0]
b = weights[1][0]
# draw decision boundary
plt.figure(1)
xmin, xmax = min(X[:, 1]), max(X[:, 1])
xs = np.linspace(xmin, xmax, 100)
ys = [- (w1 / w2) * x - (b / w2) for x in xs]
plt.plot(xs, ys, 'b-', label='decision boundary')
plt.xlabel('x1')
plt.ylabel('x2')
plt.xlim((30, 100))
plt.ylim((30, 100))
plt.legend()
plt.show()
|
Add a logistic regression exampleimport os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation
"""2-class logistic regression by keras"""
def plot_data(X, y):
positive = [i for i in range(len(y)) if y[i] == 1]
negative = [i for i in range(len(y)) if y[i] == 0]
plt.scatter(X[positive, 0], X[positive, 1], c='red', marker='o', label='positive')
plt.scatter(X[negative, 0], X[negative, 1], c='blue', marker='o', label='negative')
if __name__ == '__main__':
# fix random seed
seed = 1
np.random.seed(seed)
# load training data
data = np.genfromtxt(os.path.join('data', 'ex2data1.txt'), delimiter=',')
X = data[:, (0, 1)]
y = data[:, 2]
# plot training data
plt.figure(1)
plot_data(X, y)
# create the model
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# fit the model
model.fit(X, y, nb_epoch=1000, batch_size=1, verbose=1)
# get the learned weight
weights = model.layers[0].get_weights()
w1 = weights[0][0, 0]
w2 = weights[0][1, 0]
b = weights[1][0]
# draw decision boundary
plt.figure(1)
xmin, xmax = min(X[:, 1]), max(X[:, 1])
xs = np.linspace(xmin, xmax, 100)
ys = [- (w1 / w2) * x - (b / w2) for x in xs]
plt.plot(xs, ys, 'b-', label='decision boundary')
plt.xlabel('x1')
plt.ylabel('x2')
plt.xlim((30, 100))
plt.ylim((30, 100))
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add a logistic regression example<commit_after>import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation
"""2-class logistic regression by keras"""
def plot_data(X, y):
positive = [i for i in range(len(y)) if y[i] == 1]
negative = [i for i in range(len(y)) if y[i] == 0]
plt.scatter(X[positive, 0], X[positive, 1], c='red', marker='o', label='positive')
plt.scatter(X[negative, 0], X[negative, 1], c='blue', marker='o', label='negative')
if __name__ == '__main__':
# fix random seed
seed = 1
np.random.seed(seed)
# load training data
data = np.genfromtxt(os.path.join('data', 'ex2data1.txt'), delimiter=',')
X = data[:, (0, 1)]
y = data[:, 2]
# plot training data
plt.figure(1)
plot_data(X, y)
# create the model
model = Sequential()
model.add(Dense(1, input_shape=(2, )))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# fit the model
model.fit(X, y, nb_epoch=1000, batch_size=1, verbose=1)
# get the learned weight
weights = model.layers[0].get_weights()
w1 = weights[0][0, 0]
w2 = weights[0][1, 0]
b = weights[1][0]
# draw decision boundary
plt.figure(1)
xmin, xmax = min(X[:, 1]), max(X[:, 1])
xs = np.linspace(xmin, xmax, 100)
ys = [- (w1 / w2) * x - (b / w2) for x in xs]
plt.plot(xs, ys, 'b-', label='decision boundary')
plt.xlabel('x1')
plt.ylabel('x2')
plt.xlim((30, 100))
plt.ylim((30, 100))
plt.legend()
plt.show()
|
|
3c94874e1ad30325cb0464eec956d55c97130233
|
clean_table.py
|
clean_table.py
|
from lxml import etree
from urllib.request import urlopen
import pandas as pd
DATA_URL = "https://en.wikipedia.org/w/index.php?title=Spacecraft_propulsion&oldid=760799107"
parser = etree.HTMLParser()
with urlopen(DATA_URL) as fp:
all_html = etree.parse(fp, parser)
tables = [t for t in all_html.xpath(r"//table[@class='wikitable sortable']")]
table = tables[0]
# Clear sort values
for span in table.xpath(r"//span[@style='display:none']"):
span.text = ""
with open("table_stripped_final.html", "w") as fp:
print(etree.tostring(table, pretty_print=True).decode("utf-8"), file=fp)
# TODO: We could save an intermediate file here
data = pd.read_html("table_stripped_final.html")[0]
data.columns = data.iloc[0].values
# Remove top and bottom headers
data = data.iloc[1:-1]
# Use None values
data = data.replace(["?"], [None])
# Replace non-ASCII character
data = data.replace(["(.*) – (.*)"], [r"\1 ~ \2"], regex=True)
# Remove citation marks
data = data.replace([r"(.*)\[(full )?citation needed\]"], [r"\1"], regex=True)
data = data.replace(["([^\]]*)(\[.+\])+"], [r"\1"], regex=True)
# Final cleanup of corner cases
data.at[12, "Thrust (N)"] = "9/km2 ~ 230/km2"
data.at[12, "Technology readiness level"] = "5: Light-sail validated in lit vacuum\'"
data.at[25, "Technology readiness level"] = data.at[25, "Thrust (N)"]
data.at[25, "Thrust (N)"] = None
data.at[25, "Effective exhaust velocity (km/s)"] = None
data.at[31, "Technology readiness level"] = data.at[31, "Thrust (N)"]
data.at[31, "Thrust (N)"] = None
data.at[31, "Effective exhaust velocity (km/s)"] = None
# Export
data.to_csv("table.csv", index=False)
# Simplify LaTeX output
data["TRL"] = data.pop("Technology readiness level").replace(["(\d):.*"], [r"\1"], regex=True).astype(int)
# http://tex.stackexchange.com/a/63592/2488
with open("table_long.tex", "w") as fp:
data.fillna("?").to_latex(fp, index=False, longtable=True)
|
Add script to extract table of propulsion methods
|
Add script to extract table of propulsion methods
TODO: Organize code
|
Python
|
mit
|
Juanlu001/pfc-uc3m
|
Add script to extract table of propulsion methods
TODO: Organize code
|
from lxml import etree
from urllib.request import urlopen
import pandas as pd
DATA_URL = "https://en.wikipedia.org/w/index.php?title=Spacecraft_propulsion&oldid=760799107"
parser = etree.HTMLParser()
with urlopen(DATA_URL) as fp:
all_html = etree.parse(fp, parser)
tables = [t for t in all_html.xpath(r"//table[@class='wikitable sortable']")]
table = tables[0]
# Clear sort values
for span in table.xpath(r"//span[@style='display:none']"):
span.text = ""
with open("table_stripped_final.html", "w") as fp:
print(etree.tostring(table, pretty_print=True).decode("utf-8"), file=fp)
# TODO: We could save an intermediate file here
data = pd.read_html("table_stripped_final.html")[0]
data.columns = data.iloc[0].values
# Remove top and bottom headers
data = data.iloc[1:-1]
# Use None values
data = data.replace(["?"], [None])
# Replace non-ASCII character
data = data.replace(["(.*) – (.*)"], [r"\1 ~ \2"], regex=True)
# Remove citation marks
data = data.replace([r"(.*)\[(full )?citation needed\]"], [r"\1"], regex=True)
data = data.replace(["([^\]]*)(\[.+\])+"], [r"\1"], regex=True)
# Final cleanup of corner cases
data.at[12, "Thrust (N)"] = "9/km2 ~ 230/km2"
data.at[12, "Technology readiness level"] = "5: Light-sail validated in lit vacuum\'"
data.at[25, "Technology readiness level"] = data.at[25, "Thrust (N)"]
data.at[25, "Thrust (N)"] = None
data.at[25, "Effective exhaust velocity (km/s)"] = None
data.at[31, "Technology readiness level"] = data.at[31, "Thrust (N)"]
data.at[31, "Thrust (N)"] = None
data.at[31, "Effective exhaust velocity (km/s)"] = None
# Export
data.to_csv("table.csv", index=False)
# Simplify LaTeX output
data["TRL"] = data.pop("Technology readiness level").replace(["(\d):.*"], [r"\1"], regex=True).astype(int)
# http://tex.stackexchange.com/a/63592/2488
with open("table_long.tex", "w") as fp:
data.fillna("?").to_latex(fp, index=False, longtable=True)
|
<commit_before><commit_msg>Add script to extract table of propulsion methods
TODO: Organize code<commit_after>
|
from lxml import etree
from urllib.request import urlopen
import pandas as pd
DATA_URL = "https://en.wikipedia.org/w/index.php?title=Spacecraft_propulsion&oldid=760799107"
parser = etree.HTMLParser()
with urlopen(DATA_URL) as fp:
all_html = etree.parse(fp, parser)
tables = [t for t in all_html.xpath(r"//table[@class='wikitable sortable']")]
table = tables[0]
# Clear sort values
for span in table.xpath(r"//span[@style='display:none']"):
span.text = ""
with open("table_stripped_final.html", "w") as fp:
print(etree.tostring(table, pretty_print=True).decode("utf-8"), file=fp)
# TODO: We could save an intermediate file here
data = pd.read_html("table_stripped_final.html")[0]
data.columns = data.iloc[0].values
# Remove top and bottom headers
data = data.iloc[1:-1]
# Use None values
data = data.replace(["?"], [None])
# Replace non-ASCII character
data = data.replace(["(.*) – (.*)"], [r"\1 ~ \2"], regex=True)
# Remove citation marks
data = data.replace([r"(.*)\[(full )?citation needed\]"], [r"\1"], regex=True)
data = data.replace(["([^\]]*)(\[.+\])+"], [r"\1"], regex=True)
# Final cleanup of corner cases
data.at[12, "Thrust (N)"] = "9/km2 ~ 230/km2"
data.at[12, "Technology readiness level"] = "5: Light-sail validated in lit vacuum\'"
data.at[25, "Technology readiness level"] = data.at[25, "Thrust (N)"]
data.at[25, "Thrust (N)"] = None
data.at[25, "Effective exhaust velocity (km/s)"] = None
data.at[31, "Technology readiness level"] = data.at[31, "Thrust (N)"]
data.at[31, "Thrust (N)"] = None
data.at[31, "Effective exhaust velocity (km/s)"] = None
# Export
data.to_csv("table.csv", index=False)
# Simplify LaTeX output
data["TRL"] = data.pop("Technology readiness level").replace(["(\d):.*"], [r"\1"], regex=True).astype(int)
# http://tex.stackexchange.com/a/63592/2488
with open("table_long.tex", "w") as fp:
data.fillna("?").to_latex(fp, index=False, longtable=True)
|
Add script to extract table of propulsion methods
TODO: Organize codefrom lxml import etree
from urllib.request import urlopen
import pandas as pd
DATA_URL = "https://en.wikipedia.org/w/index.php?title=Spacecraft_propulsion&oldid=760799107"
parser = etree.HTMLParser()
with urlopen(DATA_URL) as fp:
all_html = etree.parse(fp, parser)
tables = [t for t in all_html.xpath(r"//table[@class='wikitable sortable']")]
table = tables[0]
# Clear sort values
for span in table.xpath(r"//span[@style='display:none']"):
span.text = ""
with open("table_stripped_final.html", "w") as fp:
print(etree.tostring(table, pretty_print=True).decode("utf-8"), file=fp)
# TODO: We could save an intermediate file here
data = pd.read_html("table_stripped_final.html")[0]
data.columns = data.iloc[0].values
# Remove top and bottom headers
data = data.iloc[1:-1]
# Use None values
data = data.replace(["?"], [None])
# Replace non-ASCII character
data = data.replace(["(.*) – (.*)"], [r"\1 ~ \2"], regex=True)
# Remove citation marks
data = data.replace([r"(.*)\[(full )?citation needed\]"], [r"\1"], regex=True)
data = data.replace(["([^\]]*)(\[.+\])+"], [r"\1"], regex=True)
# Final cleanup of corner cases
data.at[12, "Thrust (N)"] = "9/km2 ~ 230/km2"
data.at[12, "Technology readiness level"] = "5: Light-sail validated in lit vacuum\'"
data.at[25, "Technology readiness level"] = data.at[25, "Thrust (N)"]
data.at[25, "Thrust (N)"] = None
data.at[25, "Effective exhaust velocity (km/s)"] = None
data.at[31, "Technology readiness level"] = data.at[31, "Thrust (N)"]
data.at[31, "Thrust (N)"] = None
data.at[31, "Effective exhaust velocity (km/s)"] = None
# Export
data.to_csv("table.csv", index=False)
# Simplify LaTeX output
data["TRL"] = data.pop("Technology readiness level").replace(["(\d):.*"], [r"\1"], regex=True).astype(int)
# http://tex.stackexchange.com/a/63592/2488
with open("table_long.tex", "w") as fp:
data.fillna("?").to_latex(fp, index=False, longtable=True)
|
<commit_before><commit_msg>Add script to extract table of propulsion methods
TODO: Organize code<commit_after>from lxml import etree
from urllib.request import urlopen
import pandas as pd
DATA_URL = "https://en.wikipedia.org/w/index.php?title=Spacecraft_propulsion&oldid=760799107"
parser = etree.HTMLParser()
with urlopen(DATA_URL) as fp:
all_html = etree.parse(fp, parser)
tables = [t for t in all_html.xpath(r"//table[@class='wikitable sortable']")]
table = tables[0]
# Clear sort values
for span in table.xpath(r"//span[@style='display:none']"):
span.text = ""
with open("table_stripped_final.html", "w") as fp:
print(etree.tostring(table, pretty_print=True).decode("utf-8"), file=fp)
# TODO: We could save an intermediate file here
data = pd.read_html("table_stripped_final.html")[0]
data.columns = data.iloc[0].values
# Remove top and bottom headers
data = data.iloc[1:-1]
# Use None values
data = data.replace(["?"], [None])
# Replace non-ASCII character
data = data.replace(["(.*) – (.*)"], [r"\1 ~ \2"], regex=True)
# Remove citation marks
data = data.replace([r"(.*)\[(full )?citation needed\]"], [r"\1"], regex=True)
data = data.replace(["([^\]]*)(\[.+\])+"], [r"\1"], regex=True)
# Final cleanup of corner cases
data.at[12, "Thrust (N)"] = "9/km2 ~ 230/km2"
data.at[12, "Technology readiness level"] = "5: Light-sail validated in lit vacuum\'"
data.at[25, "Technology readiness level"] = data.at[25, "Thrust (N)"]
data.at[25, "Thrust (N)"] = None
data.at[25, "Effective exhaust velocity (km/s)"] = None
data.at[31, "Technology readiness level"] = data.at[31, "Thrust (N)"]
data.at[31, "Thrust (N)"] = None
data.at[31, "Effective exhaust velocity (km/s)"] = None
# Export
data.to_csv("table.csv", index=False)
# Simplify LaTeX output
data["TRL"] = data.pop("Technology readiness level").replace(["(\d):.*"], [r"\1"], regex=True).astype(int)
# http://tex.stackexchange.com/a/63592/2488
with open("table_long.tex", "w") as fp:
data.fillna("?").to_latex(fp, index=False, longtable=True)
|
|
901c484590367e5e6c98d6f30d355fd6b9707861
|
get_jamdb_users.py
|
get_jamdb_users.py
|
import requests
import json
JAMDB_AUTHORIZATION_TOKEN = 'JWT_SECRET_TOKEN'
# To set this make this request
# POST https://metadata.osf.io/v1/auth
#
# {
# "data": {
# "type": "users",
# "attributes": {
# "provider": "osf",
# "access_token": "PERSONAL_ACCESS_TOKEN"
# }
# }
# }
# Fill this in with a Personal Access Token from the osf
# with a user that has super magic access to JamDB for lookit accounts
# The JAMDB_AUTHORIZATION_TOKEN will be returned as data.attributes.token in the response.
# It's good for a limited time and the call will need to be repeated to rerun this.
def get_jamdb_users():
with open('./participants.json', mode='w') as f:
peeps = []
for x in range(1, 35):
try:
response = requests.get(
url="https://metadata.osf.io/v1/id/collections/lookit.accounts/_search",
params={
"page[size]": "100",
"page": str(x),
},
headers={
"Authorization": JAMDB_AUTHORIZATION_TOKEN,
},
)
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
try:
peeps.extend(response.json()['data'])
except KeyError:
continue
print(len(peeps))
f.write(json.dumps(peeps, indent=4))
|
Include script to get users from jamdb lookit
|
Include script to get users from jamdb lookit
|
Python
|
apache-2.0
|
CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api
|
Include script to get users from jamdb lookit
|
import requests
import json
JAMDB_AUTHORIZATION_TOKEN = 'JWT_SECRET_TOKEN'
# To set this make this request
# POST https://metadata.osf.io/v1/auth
#
# {
# "data": {
# "type": "users",
# "attributes": {
# "provider": "osf",
# "access_token": "PERSONAL_ACCESS_TOKEN"
# }
# }
# }
# Fill this in with a Personal Access Token from the osf
# with a user that has super magic access to JamDB for lookit accounts
# The JAMDB_AUTHORIZATION_TOKEN will be returned as data.attributes.token in the response.
# It's good for a limited time and the call will need to be repeated to rerun this.
def get_jamdb_users():
with open('./participants.json', mode='w') as f:
peeps = []
for x in range(1, 35):
try:
response = requests.get(
url="https://metadata.osf.io/v1/id/collections/lookit.accounts/_search",
params={
"page[size]": "100",
"page": str(x),
},
headers={
"Authorization": JAMDB_AUTHORIZATION_TOKEN,
},
)
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
try:
peeps.extend(response.json()['data'])
except KeyError:
continue
print(len(peeps))
f.write(json.dumps(peeps, indent=4))
|
<commit_before><commit_msg>Include script to get users from jamdb lookit<commit_after>
|
import requests
import json
JAMDB_AUTHORIZATION_TOKEN = 'JWT_SECRET_TOKEN'
# To set this make this request
# POST https://metadata.osf.io/v1/auth
#
# {
# "data": {
# "type": "users",
# "attributes": {
# "provider": "osf",
# "access_token": "PERSONAL_ACCESS_TOKEN"
# }
# }
# }
# Fill this in with a Personal Access Token from the osf
# with a user that has super magic access to JamDB for lookit accounts
# The JAMDB_AUTHORIZATION_TOKEN will be returned as data.attributes.token in the response.
# It's good for a limited time and the call will need to be repeated to rerun this.
def get_jamdb_users():
with open('./participants.json', mode='w') as f:
peeps = []
for x in range(1, 35):
try:
response = requests.get(
url="https://metadata.osf.io/v1/id/collections/lookit.accounts/_search",
params={
"page[size]": "100",
"page": str(x),
},
headers={
"Authorization": JAMDB_AUTHORIZATION_TOKEN,
},
)
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
try:
peeps.extend(response.json()['data'])
except KeyError:
continue
print(len(peeps))
f.write(json.dumps(peeps, indent=4))
|
Include script to get users from jamdb lookitimport requests
import json
JAMDB_AUTHORIZATION_TOKEN = 'JWT_SECRET_TOKEN'
# To set this make this request
# POST https://metadata.osf.io/v1/auth
#
# {
# "data": {
# "type": "users",
# "attributes": {
# "provider": "osf",
# "access_token": "PERSONAL_ACCESS_TOKEN"
# }
# }
# }
# Fill this in with a Personal Access Token from the osf
# with a user that has super magic access to JamDB for lookit accounts
# The JAMDB_AUTHORIZATION_TOKEN will be returned as data.attributes.token in the response.
# It's good for a limited time and the call will need to be repeated to rerun this.
def get_jamdb_users():
with open('./participants.json', mode='w') as f:
peeps = []
for x in range(1, 35):
try:
response = requests.get(
url="https://metadata.osf.io/v1/id/collections/lookit.accounts/_search",
params={
"page[size]": "100",
"page": str(x),
},
headers={
"Authorization": JAMDB_AUTHORIZATION_TOKEN,
},
)
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
try:
peeps.extend(response.json()['data'])
except KeyError:
continue
print(len(peeps))
f.write(json.dumps(peeps, indent=4))
|
<commit_before><commit_msg>Include script to get users from jamdb lookit<commit_after>import requests
import json
JAMDB_AUTHORIZATION_TOKEN = 'JWT_SECRET_TOKEN'
# To set this make this request
# POST https://metadata.osf.io/v1/auth
#
# {
# "data": {
# "type": "users",
# "attributes": {
# "provider": "osf",
# "access_token": "PERSONAL_ACCESS_TOKEN"
# }
# }
# }
# Fill this in with a Personal Access Token from the osf
# with a user that has super magic access to JamDB for lookit accounts
# The JAMDB_AUTHORIZATION_TOKEN will be returned as data.attributes.token in the response.
# It's good for a limited time and the call will need to be repeated to rerun this.
def get_jamdb_users():
with open('./participants.json', mode='w') as f:
peeps = []
for x in range(1, 35):
try:
response = requests.get(
url="https://metadata.osf.io/v1/id/collections/lookit.accounts/_search",
params={
"page[size]": "100",
"page": str(x),
},
headers={
"Authorization": JAMDB_AUTHORIZATION_TOKEN,
},
)
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
try:
peeps.extend(response.json()['data'])
except KeyError:
continue
print(len(peeps))
f.write(json.dumps(peeps, indent=4))
|
|
d5ac116901d554b055386c71aa4a9c70b2291a33
|
myAssert.py
|
myAssert.py
|
from __future__ import print_function
def areEqual(expect, val, eps = 0.01):
print("Expected: ", expect, " actual: ", val)
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "***** Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}. *****".format(expect, val, diff)
assert expect * val >= 0.0, "***** Values don't have the same sign: expected= {:f}, found= {:f}. *****".format(expect, val)
except BaseException as be:
print(be)
|
Put my custom assert function into its own module.
|
Put my custom assert function into its own module.
|
Python
|
agpl-3.0
|
cielling/jupyternbs
|
Put my custom assert function into its own module.
|
from __future__ import print_function
def areEqual(expect, val, eps = 0.01):
print("Expected: ", expect, " actual: ", val)
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "***** Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}. *****".format(expect, val, diff)
assert expect * val >= 0.0, "***** Values don't have the same sign: expected= {:f}, found= {:f}. *****".format(expect, val)
except BaseException as be:
print(be)
|
<commit_before><commit_msg>Put my custom assert function into its own module.<commit_after>
|
from __future__ import print_function
def areEqual(expect, val, eps = 0.01):
print("Expected: ", expect, " actual: ", val)
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "***** Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}. *****".format(expect, val, diff)
assert expect * val >= 0.0, "***** Values don't have the same sign: expected= {:f}, found= {:f}. *****".format(expect, val)
except BaseException as be:
print(be)
|
Put my custom assert function into its own module.from __future__ import print_function
def areEqual(expect, val, eps = 0.01):
print("Expected: ", expect, " actual: ", val)
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "***** Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}. *****".format(expect, val, diff)
assert expect * val >= 0.0, "***** Values don't have the same sign: expected= {:f}, found= {:f}. *****".format(expect, val)
except BaseException as be:
print(be)
|
<commit_before><commit_msg>Put my custom assert function into its own module.<commit_after>from __future__ import print_function
def areEqual(expect, val, eps = 0.01):
print("Expected: ", expect, " actual: ", val)
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "***** Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}. *****".format(expect, val, diff)
assert expect * val >= 0.0, "***** Values don't have the same sign: expected= {:f}, found= {:f}. *****".format(expect, val)
except BaseException as be:
print(be)
|
|
4a8a2aa12134dab63632e594c900dbfd93129f97
|
openomni/nonce_test.py
|
openomni/nonce_test.py
|
import unittest
from nonce import *
class NonceTestCase(unittest.TestCase):
def test_nonces(self):
nonces = generate_nonces(42560, 661771, 4)
self.assertEqual(nonces[0], 0x8c61ee59)
self.assertEqual(nonces[1], 0xc0256620)
self.assertEqual(nonces[2], 0x15022c8a)
self.assertEqual(nonces[3], 0xacf076ca)
|
Add test for nonce generation
|
Add test for nonce generation
|
Python
|
mit
|
openaps/openomni,openaps/openomni,openaps/openomni
|
Add test for nonce generation
|
import unittest
from nonce import *
class NonceTestCase(unittest.TestCase):
def test_nonces(self):
nonces = generate_nonces(42560, 661771, 4)
self.assertEqual(nonces[0], 0x8c61ee59)
self.assertEqual(nonces[1], 0xc0256620)
self.assertEqual(nonces[2], 0x15022c8a)
self.assertEqual(nonces[3], 0xacf076ca)
|
<commit_before><commit_msg>Add test for nonce generation<commit_after>
|
import unittest
from nonce import *
class NonceTestCase(unittest.TestCase):
def test_nonces(self):
nonces = generate_nonces(42560, 661771, 4)
self.assertEqual(nonces[0], 0x8c61ee59)
self.assertEqual(nonces[1], 0xc0256620)
self.assertEqual(nonces[2], 0x15022c8a)
self.assertEqual(nonces[3], 0xacf076ca)
|
Add test for nonce generationimport unittest
from nonce import *
class NonceTestCase(unittest.TestCase):
def test_nonces(self):
nonces = generate_nonces(42560, 661771, 4)
self.assertEqual(nonces[0], 0x8c61ee59)
self.assertEqual(nonces[1], 0xc0256620)
self.assertEqual(nonces[2], 0x15022c8a)
self.assertEqual(nonces[3], 0xacf076ca)
|
<commit_before><commit_msg>Add test for nonce generation<commit_after>import unittest
from nonce import *
class NonceTestCase(unittest.TestCase):
def test_nonces(self):
nonces = generate_nonces(42560, 661771, 4)
self.assertEqual(nonces[0], 0x8c61ee59)
self.assertEqual(nonces[1], 0xc0256620)
self.assertEqual(nonces[2], 0x15022c8a)
self.assertEqual(nonces[3], 0xacf076ca)
|
|
4725a80e6a02a08ef6081eac9261cb420bdc1fee
|
django_countries/templatetags/countries.py
|
django_countries/templatetags/countries.py
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
if django.VERSION < (1, 9):
# Support older versions without implicit assignment support in simple_tag.
simple_tag = register.assignment_tag
else:
simple_tag = register.simple_tag
@simple_tag
def get_country(code):
return Country(code=code)
@simple_tag
def get_countries():
return list(countries)
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
@register.simple_tag
def get_country(code):
return Country(code=code)
@register.simple_tag
def get_countries():
return list(countries)
|
Remove Django 1.9 simple_tag reference
|
Remove Django 1.9 simple_tag reference
|
Python
|
mit
|
SmileyChris/django-countries
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
if django.VERSION < (1, 9):
# Support older versions without implicit assignment support in simple_tag.
simple_tag = register.assignment_tag
else:
simple_tag = register.simple_tag
@simple_tag
def get_country(code):
return Country(code=code)
@simple_tag
def get_countries():
return list(countries)
Remove Django 1.9 simple_tag reference
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
@register.simple_tag
def get_country(code):
return Country(code=code)
@register.simple_tag
def get_countries():
return list(countries)
|
<commit_before>import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
if django.VERSION < (1, 9):
# Support older versions without implicit assignment support in simple_tag.
simple_tag = register.assignment_tag
else:
simple_tag = register.simple_tag
@simple_tag
def get_country(code):
return Country(code=code)
@simple_tag
def get_countries():
return list(countries)
<commit_msg>Remove Django 1.9 simple_tag reference<commit_after>
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
@register.simple_tag
def get_country(code):
return Country(code=code)
@register.simple_tag
def get_countries():
return list(countries)
|
import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
if django.VERSION < (1, 9):
# Support older versions without implicit assignment support in simple_tag.
simple_tag = register.assignment_tag
else:
simple_tag = register.simple_tag
@simple_tag
def get_country(code):
return Country(code=code)
@simple_tag
def get_countries():
return list(countries)
Remove Django 1.9 simple_tag referenceimport django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
@register.simple_tag
def get_country(code):
return Country(code=code)
@register.simple_tag
def get_countries():
return list(countries)
|
<commit_before>import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
if django.VERSION < (1, 9):
# Support older versions without implicit assignment support in simple_tag.
simple_tag = register.assignment_tag
else:
simple_tag = register.simple_tag
@simple_tag
def get_country(code):
return Country(code=code)
@simple_tag
def get_countries():
return list(countries)
<commit_msg>Remove Django 1.9 simple_tag reference<commit_after>import django
from django import template
from django_countries.fields import Country, countries
register = template.Library()
@register.simple_tag
def get_country(code):
return Country(code=code)
@register.simple_tag
def get_countries():
return list(countries)
|
02b57a47f3ee117b6b32e248c698366469be1a5b
|
runtests.py
|
runtests.py
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE = 'sqlite3',
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
Allow running tests with postgres
|
Allow running tests with postgres
|
Python
|
mit
|
jayfk/django-generic-m2m,coleifer/django-generic-m2m,coleifer/django-generic-m2m,coleifer/django-generic-m2m,jayfk/django-generic-m2m
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE = 'sqlite3',
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
Allow running tests with postgres
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
<commit_before>#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE = 'sqlite3',
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
<commit_msg>Allow running tests with postgres<commit_after>
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE = 'sqlite3',
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
Allow running tests with postgres#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
<commit_before>#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE = 'sqlite3',
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
<commit_msg>Allow running tests with postgres<commit_after>#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
0afda70c3fa96bba8908c7b1c8d310389a74b694
|
zerver/migrations/0397_remove_custom_field_values_for_deleted_options.py
|
zerver/migrations/0397_remove_custom_field_values_for_deleted_options.py
|
# Generated by Django 3.2.13 on 2022-06-17 17:39
import orjson
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def remove_custom_field_values_for_deleted_options(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
SELECT_TYPE = 3
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
select_type_fields = CustomProfileField.objects.filter(field_type=SELECT_TYPE)
for field in select_type_fields:
field_data = orjson.loads(field.field_data)
current_options = list(field_data.keys())
CustomProfileFieldValue.objects.filter(field=field).exclude(
value__in=current_options
).delete()
class Migration(migrations.Migration):
dependencies = [
("zerver", "0396_remove_subscription_role"),
]
operations = [
migrations.RunPython(
remove_custom_field_values_for_deleted_options,
elidable=True,
),
]
|
Add migration to remove user values for deleted options.
|
migration: Add migration to remove user values for deleted options.
This commit adds migration to delete CustomProfileFieldValue
objects for deleted options of SELECT type custom profile
fields.
|
Python
|
apache-2.0
|
andersk/zulip,rht/zulip,andersk/zulip,zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,rht/zulip,zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,rht/zulip,rht/zulip,zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,zulip/zulip,andersk/zulip,andersk/zulip
|
migration: Add migration to remove user values for deleted options.
This commit adds migration to delete CustomProfileFieldValue
objects for deleted options of SELECT type custom profile
fields.
|
# Generated by Django 3.2.13 on 2022-06-17 17:39
import orjson
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def remove_custom_field_values_for_deleted_options(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
SELECT_TYPE = 3
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
select_type_fields = CustomProfileField.objects.filter(field_type=SELECT_TYPE)
for field in select_type_fields:
field_data = orjson.loads(field.field_data)
current_options = list(field_data.keys())
CustomProfileFieldValue.objects.filter(field=field).exclude(
value__in=current_options
).delete()
class Migration(migrations.Migration):
dependencies = [
("zerver", "0396_remove_subscription_role"),
]
operations = [
migrations.RunPython(
remove_custom_field_values_for_deleted_options,
elidable=True,
),
]
|
<commit_before><commit_msg>migration: Add migration to remove user values for deleted options.
This commit adds migration to delete CustomProfileFieldValue
objects for deleted options of SELECT type custom profile
fields.<commit_after>
|
# Generated by Django 3.2.13 on 2022-06-17 17:39
import orjson
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def remove_custom_field_values_for_deleted_options(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
SELECT_TYPE = 3
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
select_type_fields = CustomProfileField.objects.filter(field_type=SELECT_TYPE)
for field in select_type_fields:
field_data = orjson.loads(field.field_data)
current_options = list(field_data.keys())
CustomProfileFieldValue.objects.filter(field=field).exclude(
value__in=current_options
).delete()
class Migration(migrations.Migration):
dependencies = [
("zerver", "0396_remove_subscription_role"),
]
operations = [
migrations.RunPython(
remove_custom_field_values_for_deleted_options,
elidable=True,
),
]
|
migration: Add migration to remove user values for deleted options.
This commit adds migration to delete CustomProfileFieldValue
objects for deleted options of SELECT type custom profile
fields.# Generated by Django 3.2.13 on 2022-06-17 17:39
import orjson
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def remove_custom_field_values_for_deleted_options(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
SELECT_TYPE = 3
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
select_type_fields = CustomProfileField.objects.filter(field_type=SELECT_TYPE)
for field in select_type_fields:
field_data = orjson.loads(field.field_data)
current_options = list(field_data.keys())
CustomProfileFieldValue.objects.filter(field=field).exclude(
value__in=current_options
).delete()
class Migration(migrations.Migration):
dependencies = [
("zerver", "0396_remove_subscription_role"),
]
operations = [
migrations.RunPython(
remove_custom_field_values_for_deleted_options,
elidable=True,
),
]
|
<commit_before><commit_msg>migration: Add migration to remove user values for deleted options.
This commit adds migration to delete CustomProfileFieldValue
objects for deleted options of SELECT type custom profile
fields.<commit_after># Generated by Django 3.2.13 on 2022-06-17 17:39
import orjson
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def remove_custom_field_values_for_deleted_options(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
SELECT_TYPE = 3
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
CustomProfileFieldValue = apps.get_model("zerver", "CustomProfileFieldValue")
select_type_fields = CustomProfileField.objects.filter(field_type=SELECT_TYPE)
for field in select_type_fields:
field_data = orjson.loads(field.field_data)
current_options = list(field_data.keys())
CustomProfileFieldValue.objects.filter(field=field).exclude(
value__in=current_options
).delete()
class Migration(migrations.Migration):
dependencies = [
("zerver", "0396_remove_subscription_role"),
]
operations = [
migrations.RunPython(
remove_custom_field_values_for_deleted_options,
elidable=True,
),
]
|
|
56d444a1233b027718e0f7bfdf2c7d27b4de45d4
|
into/backends/spark.py
|
into/backends/spark.py
|
class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, RDD)
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
|
class Dummy(object):
pass
try:
from pyspark import SparkContext
import pyspark
from pyspark import RDD
from pyspark.rdd import PipelinedRDD
from pyspark.sql import SchemaRDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, (RDD, PipelinedRDD, SchemaRDD))
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
|
Add convert for the various RDDs
|
Add convert for the various RDDs
|
Python
|
bsd-3-clause
|
cpcloud/odo,blaze/odo,ContinuumIO/odo,ContinuumIO/odo,blaze/odo,quantopian/odo,alexmojaki/odo,ywang007/odo,cpcloud/odo,ywang007/odo,cowlicks/odo,Dannnno/odo,alexmojaki/odo,Dannnno/odo,quantopian/odo,cowlicks/odo
|
class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, RDD)
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
Add convert for the various RDDs
|
class Dummy(object):
pass
try:
from pyspark import SparkContext
import pyspark
from pyspark import RDD
from pyspark.rdd import PipelinedRDD
from pyspark.sql import SchemaRDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, (RDD, PipelinedRDD, SchemaRDD))
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
|
<commit_before>class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, RDD)
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
<commit_msg>Add convert for the various RDDs<commit_after>
|
class Dummy(object):
pass
try:
from pyspark import SparkContext
import pyspark
from pyspark import RDD
from pyspark.rdd import PipelinedRDD
from pyspark.sql import SchemaRDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, (RDD, PipelinedRDD, SchemaRDD))
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
|
class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, RDD)
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
Add convert for the various RDDsclass Dummy(object):
pass
try:
from pyspark import SparkContext
import pyspark
from pyspark import RDD
from pyspark.rdd import PipelinedRDD
from pyspark.sql import SchemaRDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, (RDD, PipelinedRDD, SchemaRDD))
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
|
<commit_before>class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, RDD)
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
<commit_msg>Add convert for the various RDDs<commit_after>class Dummy(object):
pass
try:
from pyspark import SparkContext
import pyspark
from pyspark import RDD
from pyspark.rdd import PipelinedRDD
from pyspark.sql import SchemaRDD
RDD.min
except (AttributeError, ImportError):
SparkContext = Dummy
pyspark = Dummy()
RDD = Dummy
from collections import Iterator
from datashape import var
from ..into import convert, append
from ..core import discover
@append.register(SparkContext, (list, tuple, Iterator))
def iterable_to_spark_context(sc, seq, **kwargs):
return sc.parallelize(seq)
@append.register(RDD, (list, tuple))
def sequence_to_rdd(rdd, seq, **kwargs):
# Hm this seems anti-pattern-y
return append(rdd.context, seq)
@convert.register(list, (RDD, PipelinedRDD, SchemaRDD))
def rdd_to_list(rdd, **kwargs):
return rdd.collect()
@discover.register(RDD)
def discover_rdd(rdd, n=50, **kwargs):
data = rdd.take(n)
return var * discover(data).subshape[0]
|
3620bafe1ce573d08fca7db357f4df40d6949cfb
|
flowz/channels/__init__.py
|
flowz/channels/__init__.py
|
from __future__ import absolute_import
from .core import (ChannelDone, Channel, ReadChannel, MapChannel, FlatMapChannel,
FilterChannel, FutureChannel, ReadyFutureChannel, TeeChannel,
ProducerChannel, IterChannel, ZipChannel, CoGroupChannel,
WindowChannel, GroupChannel)
|
from __future__ import absolute_import
from .core import (
Channel,
ChannelDone,
CoGroupChannel,
FilterChannel,
FlatMapChannel,
FutureChannel,
GroupChannel,
IterChannel,
MapChannel,
ProducerChannel,
ReadChannel,
ReadyFutureChannel,
TeeChannel,
WindowChannel,
ZipChannel)
|
Change to one package per line for channel import
|
Change to one package per line for channel import
Resolves #19.
While this doesn't switch to the ideal standard of one import, one
line, it makes the import from `flowz.channels.core` into
`flowz.channels` easier to read and less likely to invite conflicts.
Didn't go to "from .core import" per line primarily due to pragmatism,
though that would normally be my preference.
|
Python
|
mit
|
ethanrowe/flowz,PatrickDRusk/flowz
|
from __future__ import absolute_import
from .core import (ChannelDone, Channel, ReadChannel, MapChannel, FlatMapChannel,
FilterChannel, FutureChannel, ReadyFutureChannel, TeeChannel,
ProducerChannel, IterChannel, ZipChannel, CoGroupChannel,
WindowChannel, GroupChannel)
Change to one package per line for channel import
Resolves #19.
While this doesn't switch to the ideal standard of one import, one
line, it makes the import from `flowz.channels.core` into
`flowz.channels` easier to read and less likely to invite conflicts.
Didn't go to "from .core import" per line primarily due to pragmatism,
though that would normally be my preference.
|
from __future__ import absolute_import
from .core import (
Channel,
ChannelDone,
CoGroupChannel,
FilterChannel,
FlatMapChannel,
FutureChannel,
GroupChannel,
IterChannel,
MapChannel,
ProducerChannel,
ReadChannel,
ReadyFutureChannel,
TeeChannel,
WindowChannel,
ZipChannel)
|
<commit_before>from __future__ import absolute_import
from .core import (ChannelDone, Channel, ReadChannel, MapChannel, FlatMapChannel,
FilterChannel, FutureChannel, ReadyFutureChannel, TeeChannel,
ProducerChannel, IterChannel, ZipChannel, CoGroupChannel,
WindowChannel, GroupChannel)
<commit_msg>Change to one package per line for channel import
Resolves #19.
While this doesn't switch to the ideal standard of one import, one
line, it makes the import from `flowz.channels.core` into
`flowz.channels` easier to read and less likely to invite conflicts.
Didn't go to "from .core import" per line primarily due to pragmatism,
though that would normally be my preference.<commit_after>
|
from __future__ import absolute_import
from .core import (
Channel,
ChannelDone,
CoGroupChannel,
FilterChannel,
FlatMapChannel,
FutureChannel,
GroupChannel,
IterChannel,
MapChannel,
ProducerChannel,
ReadChannel,
ReadyFutureChannel,
TeeChannel,
WindowChannel,
ZipChannel)
|
from __future__ import absolute_import
from .core import (ChannelDone, Channel, ReadChannel, MapChannel, FlatMapChannel,
FilterChannel, FutureChannel, ReadyFutureChannel, TeeChannel,
ProducerChannel, IterChannel, ZipChannel, CoGroupChannel,
WindowChannel, GroupChannel)
Change to one package per line for channel import
Resolves #19.
While this doesn't switch to the ideal standard of one import, one
line, it makes the import from `flowz.channels.core` into
`flowz.channels` easier to read and less likely to invite conflicts.
Didn't go to "from .core import" per line primarily due to pragmatism,
though that would normally be my preference.from __future__ import absolute_import
from .core import (
Channel,
ChannelDone,
CoGroupChannel,
FilterChannel,
FlatMapChannel,
FutureChannel,
GroupChannel,
IterChannel,
MapChannel,
ProducerChannel,
ReadChannel,
ReadyFutureChannel,
TeeChannel,
WindowChannel,
ZipChannel)
|
<commit_before>from __future__ import absolute_import
from .core import (ChannelDone, Channel, ReadChannel, MapChannel, FlatMapChannel,
FilterChannel, FutureChannel, ReadyFutureChannel, TeeChannel,
ProducerChannel, IterChannel, ZipChannel, CoGroupChannel,
WindowChannel, GroupChannel)
<commit_msg>Change to one package per line for channel import
Resolves #19.
While this doesn't switch to the ideal standard of one import, one
line, it makes the import from `flowz.channels.core` into
`flowz.channels` easier to read and less likely to invite conflicts.
Didn't go to "from .core import" per line primarily due to pragmatism,
though that would normally be my preference.<commit_after>from __future__ import absolute_import
from .core import (
Channel,
ChannelDone,
CoGroupChannel,
FilterChannel,
FlatMapChannel,
FutureChannel,
GroupChannel,
IterChannel,
MapChannel,
ProducerChannel,
ReadChannel,
ReadyFutureChannel,
TeeChannel,
WindowChannel,
ZipChannel)
|
357a37d607b4b85d46e26fa4d2409f23d923d176
|
src/client/keyboard/win.py
|
src/client/keyboard/win.py
|
#!/usr/bin/env python
import re, sys, os
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: win.py path/to/WinUser.h"
sys.exit(1)
OUT = 'win2.txt'
TMP = OUT + '.tmp'
f = open(TMP, 'w')
try:
print >>f, '; Automatically generated by "win.py"'
VK = re.compile('#define\s*VK_(\w+)\s+(\w+)')
for line in open(sys.argv[1], 'r'):
m = VK.match(line)
if m:
n, i = m.groups()
i = int(i, 0)
print >>f, '%d\t%s' % (i, n)
f.close()
os.rename(TMP, OUT)
except:
try:
os.unlink(TMP)
except OSError:
pass
raise
|
Add keycode table generator for Windows
|
Add keycode table generator for Windows
|
Python
|
bsd-2-clause
|
depp/sglib,depp/sglib
|
Add keycode table generator for Windows
|
#!/usr/bin/env python
import re, sys, os
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: win.py path/to/WinUser.h"
sys.exit(1)
OUT = 'win2.txt'
TMP = OUT + '.tmp'
f = open(TMP, 'w')
try:
print >>f, '; Automatically generated by "win.py"'
VK = re.compile('#define\s*VK_(\w+)\s+(\w+)')
for line in open(sys.argv[1], 'r'):
m = VK.match(line)
if m:
n, i = m.groups()
i = int(i, 0)
print >>f, '%d\t%s' % (i, n)
f.close()
os.rename(TMP, OUT)
except:
try:
os.unlink(TMP)
except OSError:
pass
raise
|
<commit_before><commit_msg>Add keycode table generator for Windows<commit_after>
|
#!/usr/bin/env python
import re, sys, os
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: win.py path/to/WinUser.h"
sys.exit(1)
OUT = 'win2.txt'
TMP = OUT + '.tmp'
f = open(TMP, 'w')
try:
print >>f, '; Automatically generated by "win.py"'
VK = re.compile('#define\s*VK_(\w+)\s+(\w+)')
for line in open(sys.argv[1], 'r'):
m = VK.match(line)
if m:
n, i = m.groups()
i = int(i, 0)
print >>f, '%d\t%s' % (i, n)
f.close()
os.rename(TMP, OUT)
except:
try:
os.unlink(TMP)
except OSError:
pass
raise
|
Add keycode table generator for Windows#!/usr/bin/env python
import re, sys, os
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: win.py path/to/WinUser.h"
sys.exit(1)
OUT = 'win2.txt'
TMP = OUT + '.tmp'
f = open(TMP, 'w')
try:
print >>f, '; Automatically generated by "win.py"'
VK = re.compile('#define\s*VK_(\w+)\s+(\w+)')
for line in open(sys.argv[1], 'r'):
m = VK.match(line)
if m:
n, i = m.groups()
i = int(i, 0)
print >>f, '%d\t%s' % (i, n)
f.close()
os.rename(TMP, OUT)
except:
try:
os.unlink(TMP)
except OSError:
pass
raise
|
<commit_before><commit_msg>Add keycode table generator for Windows<commit_after>#!/usr/bin/env python
import re, sys, os
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: win.py path/to/WinUser.h"
sys.exit(1)
OUT = 'win2.txt'
TMP = OUT + '.tmp'
f = open(TMP, 'w')
try:
print >>f, '; Automatically generated by "win.py"'
VK = re.compile('#define\s*VK_(\w+)\s+(\w+)')
for line in open(sys.argv[1], 'r'):
m = VK.match(line)
if m:
n, i = m.groups()
i = int(i, 0)
print >>f, '%d\t%s' % (i, n)
f.close()
os.rename(TMP, OUT)
except:
try:
os.unlink(TMP)
except OSError:
pass
raise
|
|
950cef23b570dc744357ab7a1dd7e8ad3d7c71d7
|
cdent/emitter/python3.py
|
cdent/emitter/python3.py
|
"""\
Python code emitter for C'Dent
"""
from cdent.emitter import Emitter as Base
class Emitter(Base):
LANGUAGE_ID = 'py3'
BLOCK_COMMENT_BEGIN = '"""\\\n'
BLOCK_COMMENT_PREFIX = ''
BLOCK_COMMENT_END = '"""\n'
def emit_includecdent(self, includecdent):
self.writeln('from cdent.run import *')
def emit_class(self, class_):
name = class_.name
self.writeln('class %s():' % name)
self.emit(class_.has, indent=True)
def emit_method(self, method):
name = method.name
self.writeln('def %s(self):' % name)
self.emit(method.has, indent=True)
def emit_println(self, println):
self.write('print(', indent=True)
self.emit(println.args)
self.write(')')
self.writeln()
def emit_return(self, return_):
self.writeln('return')
|
Add a Python 3000 emitter.
|
Add a Python 3000 emitter.
|
Python
|
bsd-2-clause
|
ingydotnet/cdent-py,ingydotnet/cdent-py,ingydotnet/cdent-py,ingydotnet/cdent-py,ingydotnet/cdent-py,ingydotnet/cdent-py,ingydotnet/cdent-py,ingydotnet/cdent-py
|
Add a Python 3000 emitter.
|
"""\
Python code emitter for C'Dent
"""
from cdent.emitter import Emitter as Base
class Emitter(Base):
LANGUAGE_ID = 'py3'
BLOCK_COMMENT_BEGIN = '"""\\\n'
BLOCK_COMMENT_PREFIX = ''
BLOCK_COMMENT_END = '"""\n'
def emit_includecdent(self, includecdent):
self.writeln('from cdent.run import *')
def emit_class(self, class_):
name = class_.name
self.writeln('class %s():' % name)
self.emit(class_.has, indent=True)
def emit_method(self, method):
name = method.name
self.writeln('def %s(self):' % name)
self.emit(method.has, indent=True)
def emit_println(self, println):
self.write('print(', indent=True)
self.emit(println.args)
self.write(')')
self.writeln()
def emit_return(self, return_):
self.writeln('return')
|
<commit_before><commit_msg>Add a Python 3000 emitter.<commit_after>
|
"""\
Python code emitter for C'Dent
"""
from cdent.emitter import Emitter as Base
class Emitter(Base):
LANGUAGE_ID = 'py3'
BLOCK_COMMENT_BEGIN = '"""\\\n'
BLOCK_COMMENT_PREFIX = ''
BLOCK_COMMENT_END = '"""\n'
def emit_includecdent(self, includecdent):
self.writeln('from cdent.run import *')
def emit_class(self, class_):
name = class_.name
self.writeln('class %s():' % name)
self.emit(class_.has, indent=True)
def emit_method(self, method):
name = method.name
self.writeln('def %s(self):' % name)
self.emit(method.has, indent=True)
def emit_println(self, println):
self.write('print(', indent=True)
self.emit(println.args)
self.write(')')
self.writeln()
def emit_return(self, return_):
self.writeln('return')
|
Add a Python 3000 emitter."""\
Python code emitter for C'Dent
"""
from cdent.emitter import Emitter as Base
class Emitter(Base):
LANGUAGE_ID = 'py3'
BLOCK_COMMENT_BEGIN = '"""\\\n'
BLOCK_COMMENT_PREFIX = ''
BLOCK_COMMENT_END = '"""\n'
def emit_includecdent(self, includecdent):
self.writeln('from cdent.run import *')
def emit_class(self, class_):
name = class_.name
self.writeln('class %s():' % name)
self.emit(class_.has, indent=True)
def emit_method(self, method):
name = method.name
self.writeln('def %s(self):' % name)
self.emit(method.has, indent=True)
def emit_println(self, println):
self.write('print(', indent=True)
self.emit(println.args)
self.write(')')
self.writeln()
def emit_return(self, return_):
self.writeln('return')
|
<commit_before><commit_msg>Add a Python 3000 emitter.<commit_after>"""\
Python code emitter for C'Dent
"""
from cdent.emitter import Emitter as Base
class Emitter(Base):
LANGUAGE_ID = 'py3'
BLOCK_COMMENT_BEGIN = '"""\\\n'
BLOCK_COMMENT_PREFIX = ''
BLOCK_COMMENT_END = '"""\n'
def emit_includecdent(self, includecdent):
self.writeln('from cdent.run import *')
def emit_class(self, class_):
name = class_.name
self.writeln('class %s():' % name)
self.emit(class_.has, indent=True)
def emit_method(self, method):
name = method.name
self.writeln('def %s(self):' % name)
self.emit(method.has, indent=True)
def emit_println(self, println):
self.write('print(', indent=True)
self.emit(println.args)
self.write(')')
self.writeln()
def emit_return(self, return_):
self.writeln('return')
|
|
54beba6ca344e6b0269e9500b3ec9f103348f217
|
scripts/line.rw.py
|
scripts/line.rw.py
|
from nxt import locator
from nxt.sensor.common import PORT_1, PORT_2, PORT_3, PORT_4
from nxt.motor import PORT_A, PORT_B, PORT_C
from scripts.robot import Robot, SERVO_NICE, ON, OFF
from scripts.utils import normalize
def main():
brick = locator.find_one_brick() # USB connection
robot = Robot(brick)
# Sensors
light = robot.init_light_sensor(PORT_2)
touch = robot.init_touch_sensor(PORT_3)
# Motors
servo = robot.init_servo(PORT_B)
syn = robot.init_synchronized_motors(PORT_A, PORT_C)
# Initial setup
robot.set_servo(SERVO_NICE) # It's called nice because the servo movement looks freaking awesome
robot.turn_light_sensor(ON)
# Light sensor calibration
black, white = robot.calibrate_light()
# Thresholds
lower = 4 * (10**-1)
upper = 9 * (10**-1)
light_level = 0
until = lambda ls=None, ts=None: normalize(ls.get_lightness(), black, white) < upper or ts.is_pressed()
robot.move_forward(until=until, ls=light, ts=touch)
while robot.running:
light_level = normalize(light.get_lightness(), black, white)
if light_level > lower:
robot.debug('Current light level: ', light_level)
robot.turn_right(8, 15) # Turn right 15° to see if we correct the course
light_level_tmp = normalize(light.get_lightness(), black, white)
if light_level_tmp > light_level:
robot.debug('Right turn didn\'t improve course. Turning left...')
robot.turn_left(8, 30)
robot.move_forward(until=until, ls=light, ts=touch)
# WARNING: From here, the code is even more experimental.
# I encourage you to consider this some sort of fancy pseudo-code.
if touch.is_pressed():
robot.debug('Found an obstacle.')
robot.turn_right(50, 45)
robot.move_forward(dist=10) # Dist is given in cm
if __name__ == '__main__':
main()
|
Add experimental version of line.py (fancier :fire:)
|
Add experimental version of line.py (fancier :fire:)
|
Python
|
mit
|
richin13/nxt-scripts
|
Add experimental version of line.py (fancier :fire:)
|
from nxt import locator
from nxt.sensor.common import PORT_1, PORT_2, PORT_3, PORT_4
from nxt.motor import PORT_A, PORT_B, PORT_C
from scripts.robot import Robot, SERVO_NICE, ON, OFF
from scripts.utils import normalize
def main():
brick = locator.find_one_brick() # USB connection
robot = Robot(brick)
# Sensors
light = robot.init_light_sensor(PORT_2)
touch = robot.init_touch_sensor(PORT_3)
# Motors
servo = robot.init_servo(PORT_B)
syn = robot.init_synchronized_motors(PORT_A, PORT_C)
# Initial setup
robot.set_servo(SERVO_NICE) # It's called nice because the servo movement looks freaking awesome
robot.turn_light_sensor(ON)
# Light sensor calibration
black, white = robot.calibrate_light()
# Thresholds
lower = 4 * (10**-1)
upper = 9 * (10**-1)
light_level = 0
until = lambda ls=None, ts=None: normalize(ls.get_lightness(), black, white) < upper or ts.is_pressed()
robot.move_forward(until=until, ls=light, ts=touch)
while robot.running:
light_level = normalize(light.get_lightness(), black, white)
if light_level > lower:
robot.debug('Current light level: ', light_level)
robot.turn_right(8, 15) # Turn right 15° to see if we correct the course
light_level_tmp = normalize(light.get_lightness(), black, white)
if light_level_tmp > light_level:
robot.debug('Right turn didn\'t improve course. Turning left...')
robot.turn_left(8, 30)
robot.move_forward(until=until, ls=light, ts=touch)
# WARNING: From here, the code is even more experimental.
# I encourage you to consider this some sort of fancy pseudo-code.
if touch.is_pressed():
robot.debug('Found an obstacle.')
robot.turn_right(50, 45)
robot.move_forward(dist=10) # Dist is given in cm
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add experimental version of line.py (fancier :fire:)<commit_after>
|
from nxt import locator
from nxt.sensor.common import PORT_1, PORT_2, PORT_3, PORT_4
from nxt.motor import PORT_A, PORT_B, PORT_C
from scripts.robot import Robot, SERVO_NICE, ON, OFF
from scripts.utils import normalize
def main():
brick = locator.find_one_brick() # USB connection
robot = Robot(brick)
# Sensors
light = robot.init_light_sensor(PORT_2)
touch = robot.init_touch_sensor(PORT_3)
# Motors
servo = robot.init_servo(PORT_B)
syn = robot.init_synchronized_motors(PORT_A, PORT_C)
# Initial setup
robot.set_servo(SERVO_NICE) # It's called nice because the servo movement looks freaking awesome
robot.turn_light_sensor(ON)
# Light sensor calibration
black, white = robot.calibrate_light()
# Thresholds
lower = 4 * (10**-1)
upper = 9 * (10**-1)
light_level = 0
until = lambda ls=None, ts=None: normalize(ls.get_lightness(), black, white) < upper or ts.is_pressed()
robot.move_forward(until=until, ls=light, ts=touch)
while robot.running:
light_level = normalize(light.get_lightness(), black, white)
if light_level > lower:
robot.debug('Current light level: ', light_level)
robot.turn_right(8, 15) # Turn right 15° to see if we correct the course
light_level_tmp = normalize(light.get_lightness(), black, white)
if light_level_tmp > light_level:
robot.debug('Right turn didn\'t improve course. Turning left...')
robot.turn_left(8, 30)
robot.move_forward(until=until, ls=light, ts=touch)
# WARNING: From here, the code is even more experimental.
# I encourage you to consider this some sort of fancy pseudo-code.
if touch.is_pressed():
robot.debug('Found an obstacle.')
robot.turn_right(50, 45)
robot.move_forward(dist=10) # Dist is given in cm
if __name__ == '__main__':
main()
|
Add experimental version of line.py (fancier :fire:)from nxt import locator
from nxt.sensor.common import PORT_1, PORT_2, PORT_3, PORT_4
from nxt.motor import PORT_A, PORT_B, PORT_C
from scripts.robot import Robot, SERVO_NICE, ON, OFF
from scripts.utils import normalize
def main():
brick = locator.find_one_brick() # USB connection
robot = Robot(brick)
# Sensors
light = robot.init_light_sensor(PORT_2)
touch = robot.init_touch_sensor(PORT_3)
# Motors
servo = robot.init_servo(PORT_B)
syn = robot.init_synchronized_motors(PORT_A, PORT_C)
# Initial setup
robot.set_servo(SERVO_NICE) # It's called nice because the servo movement looks freaking awesome
robot.turn_light_sensor(ON)
# Light sensor calibration
black, white = robot.calibrate_light()
# Thresholds
lower = 4 * (10**-1)
upper = 9 * (10**-1)
light_level = 0
until = lambda ls=None, ts=None: normalize(ls.get_lightness(), black, white) < upper or ts.is_pressed()
robot.move_forward(until=until, ls=light, ts=touch)
while robot.running:
light_level = normalize(light.get_lightness(), black, white)
if light_level > lower:
robot.debug('Current light level: ', light_level)
robot.turn_right(8, 15) # Turn right 15° to see if we correct the course
light_level_tmp = normalize(light.get_lightness(), black, white)
if light_level_tmp > light_level:
robot.debug('Right turn didn\'t improve course. Turning left...')
robot.turn_left(8, 30)
robot.move_forward(until=until, ls=light, ts=touch)
# WARNING: From here, the code is even more experimental.
# I encourage you to consider this some sort of fancy pseudo-code.
if touch.is_pressed():
robot.debug('Found an obstacle.')
robot.turn_right(50, 45)
robot.move_forward(dist=10) # Dist is given in cm
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add experimental version of line.py (fancier :fire:)<commit_after>from nxt import locator
from nxt.sensor.common import PORT_1, PORT_2, PORT_3, PORT_4
from nxt.motor import PORT_A, PORT_B, PORT_C
from scripts.robot import Robot, SERVO_NICE, ON, OFF
from scripts.utils import normalize
def main():
brick = locator.find_one_brick() # USB connection
robot = Robot(brick)
# Sensors
light = robot.init_light_sensor(PORT_2)
touch = robot.init_touch_sensor(PORT_3)
# Motors
servo = robot.init_servo(PORT_B)
syn = robot.init_synchronized_motors(PORT_A, PORT_C)
# Initial setup
robot.set_servo(SERVO_NICE) # It's called nice because the servo movement looks freaking awesome
robot.turn_light_sensor(ON)
# Light sensor calibration
black, white = robot.calibrate_light()
# Thresholds
lower = 4 * (10**-1)
upper = 9 * (10**-1)
light_level = 0
until = lambda ls=None, ts=None: normalize(ls.get_lightness(), black, white) < upper or ts.is_pressed()
robot.move_forward(until=until, ls=light, ts=touch)
while robot.running:
light_level = normalize(light.get_lightness(), black, white)
if light_level > lower:
robot.debug('Current light level: ', light_level)
robot.turn_right(8, 15) # Turn right 15° to see if we correct the course
light_level_tmp = normalize(light.get_lightness(), black, white)
if light_level_tmp > light_level:
robot.debug('Right turn didn\'t improve course. Turning left...')
robot.turn_left(8, 30)
robot.move_forward(until=until, ls=light, ts=touch)
# WARNING: From here, the code is even more experimental.
# I encourage you to consider this some sort of fancy pseudo-code.
if touch.is_pressed():
robot.debug('Found an obstacle.')
robot.turn_right(50, 45)
robot.move_forward(dist=10) # Dist is given in cm
if __name__ == '__main__':
main()
|
|
92726afbbf7e51ff21aba9e08ea0aad6c5c49dfc
|
tank_structure_test.py
|
tank_structure_test.py
|
import unittest
import tank_structure as ts
from units import inch2meter, psi2pascal
class TestStringMethods(unittest.TestCase):
def test_sample_8_3(self):
# Do sample problem 8-3 from Huzel and Huang.
stress = psi2pascal(38e3)
a = inch2meter(41.0)
l_c = inch2meter(46.9)
E = psi2pascal(10.4e6)
v = 0.36
weld_eff = 1.0
p_to = psi2pascal(180) # Oxidizer max pressure 180 psi
p_tf = psi2pascal(170) # fuel max pressure 170 psi
# Ox tank crown 0.135 inch thick.
self.assertAlmostEqual(inch2meter(0.135), ts.crown_thickness(
p_to, 1.395*a, stress, weld_eff), delta=inch2meter(0.005))
# Fuel tank cylinder 0.183 inch thick.
self.assertAlmostEqual(inch2meter(0.183), ts.cylinder_thickness(
p_tf, a, stress, weld_eff), delta=inch2meter(0.005))
# Critical external loading for fuel tank cylinder 10.8 psi
self.assertAlmostEqual(psi2pascal(10.8), ts.cr_ex_press_cylinder(
a, inch2meter(0.183), l_c, E, v), delta=1e3)
if __name__ == '__main__':
unittest.main()
|
Add limited unit test for tank structure.
|
Add limited unit test for tank structure.
|
Python
|
mit
|
mvernacc/proptools
|
Add limited unit test for tank structure.
|
import unittest
import tank_structure as ts
from units import inch2meter, psi2pascal
class TestStringMethods(unittest.TestCase):
def test_sample_8_3(self):
# Do sample problem 8-3 from Huzel and Huang.
stress = psi2pascal(38e3)
a = inch2meter(41.0)
l_c = inch2meter(46.9)
E = psi2pascal(10.4e6)
v = 0.36
weld_eff = 1.0
p_to = psi2pascal(180) # Oxidizer max pressure 180 psi
p_tf = psi2pascal(170) # fuel max pressure 170 psi
# Ox tank crown 0.135 inch thick.
self.assertAlmostEqual(inch2meter(0.135), ts.crown_thickness(
p_to, 1.395*a, stress, weld_eff), delta=inch2meter(0.005))
# Fuel tank cylinder 0.183 inch thick.
self.assertAlmostEqual(inch2meter(0.183), ts.cylinder_thickness(
p_tf, a, stress, weld_eff), delta=inch2meter(0.005))
# Critical external loading for fuel tank cylinder 10.8 psi
self.assertAlmostEqual(psi2pascal(10.8), ts.cr_ex_press_cylinder(
a, inch2meter(0.183), l_c, E, v), delta=1e3)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add limited unit test for tank structure.<commit_after>
|
import unittest
import tank_structure as ts
from units import inch2meter, psi2pascal
class TestStringMethods(unittest.TestCase):
def test_sample_8_3(self):
# Do sample problem 8-3 from Huzel and Huang.
stress = psi2pascal(38e3)
a = inch2meter(41.0)
l_c = inch2meter(46.9)
E = psi2pascal(10.4e6)
v = 0.36
weld_eff = 1.0
p_to = psi2pascal(180) # Oxidizer max pressure 180 psi
p_tf = psi2pascal(170) # fuel max pressure 170 psi
# Ox tank crown 0.135 inch thick.
self.assertAlmostEqual(inch2meter(0.135), ts.crown_thickness(
p_to, 1.395*a, stress, weld_eff), delta=inch2meter(0.005))
# Fuel tank cylinder 0.183 inch thick.
self.assertAlmostEqual(inch2meter(0.183), ts.cylinder_thickness(
p_tf, a, stress, weld_eff), delta=inch2meter(0.005))
# Critical external loading for fuel tank cylinder 10.8 psi
self.assertAlmostEqual(psi2pascal(10.8), ts.cr_ex_press_cylinder(
a, inch2meter(0.183), l_c, E, v), delta=1e3)
if __name__ == '__main__':
unittest.main()
|
Add limited unit test for tank structure.import unittest
import tank_structure as ts
from units import inch2meter, psi2pascal
class TestStringMethods(unittest.TestCase):
def test_sample_8_3(self):
# Do sample problem 8-3 from Huzel and Huang.
stress = psi2pascal(38e3)
a = inch2meter(41.0)
l_c = inch2meter(46.9)
E = psi2pascal(10.4e6)
v = 0.36
weld_eff = 1.0
p_to = psi2pascal(180) # Oxidizer max pressure 180 psi
p_tf = psi2pascal(170) # fuel max pressure 170 psi
# Ox tank crown 0.135 inch thick.
self.assertAlmostEqual(inch2meter(0.135), ts.crown_thickness(
p_to, 1.395*a, stress, weld_eff), delta=inch2meter(0.005))
# Fuel tank cylinder 0.183 inch thick.
self.assertAlmostEqual(inch2meter(0.183), ts.cylinder_thickness(
p_tf, a, stress, weld_eff), delta=inch2meter(0.005))
# Critical external loading for fuel tank cylinder 10.8 psi
self.assertAlmostEqual(psi2pascal(10.8), ts.cr_ex_press_cylinder(
a, inch2meter(0.183), l_c, E, v), delta=1e3)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add limited unit test for tank structure.<commit_after>import unittest
import tank_structure as ts
from units import inch2meter, psi2pascal
class TestStringMethods(unittest.TestCase):
def test_sample_8_3(self):
# Do sample problem 8-3 from Huzel and Huang.
stress = psi2pascal(38e3)
a = inch2meter(41.0)
l_c = inch2meter(46.9)
E = psi2pascal(10.4e6)
v = 0.36
weld_eff = 1.0
p_to = psi2pascal(180) # Oxidizer max pressure 180 psi
p_tf = psi2pascal(170) # fuel max pressure 170 psi
# Ox tank crown 0.135 inch thick.
self.assertAlmostEqual(inch2meter(0.135), ts.crown_thickness(
p_to, 1.395*a, stress, weld_eff), delta=inch2meter(0.005))
# Fuel tank cylinder 0.183 inch thick.
self.assertAlmostEqual(inch2meter(0.183), ts.cylinder_thickness(
p_tf, a, stress, weld_eff), delta=inch2meter(0.005))
# Critical external loading for fuel tank cylinder 10.8 psi
self.assertAlmostEqual(psi2pascal(10.8), ts.cr_ex_press_cylinder(
a, inch2meter(0.183), l_c, E, v), delta=1e3)
if __name__ == '__main__':
unittest.main()
|
|
538a41b25a4aae3f98e32602880f2464723a0f9d
|
docs/source/conf.py
|
docs/source/conf.py
|
# vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
# vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
Fix building the docs without RTD theme.
|
docs: Fix building the docs without RTD theme.
Signed-off-by: Andreas Schneider <5be00ddc76278cf6077f5047ca3384a88460c671@cryptomilk.org>
|
Python
|
mit
|
magus424/powerline,seanfisk/powerline,cyrixhero/powerline,bezhermoso/powerline,QuLogic/powerline,firebitsbr/powerline,junix/powerline,lukw00/powerline,EricSB/powerline,darac/powerline,xxxhycl2010/powerline,xxxhycl2010/powerline,IvanAli/powerline,seanfisk/powerline,dragon788/powerline,QuLogic/powerline,russellb/powerline,prvnkumar/powerline,magus424/powerline,bartvm/powerline,firebitsbr/powerline,IvanAli/powerline,DoctorJellyface/powerline,magus424/powerline,S0lll0s/powerline,junix/powerline,blindFS/powerline,EricSB/powerline,xfumihiro/powerline,lukw00/powerline,darac/powerline,areteix/powerline,bartvm/powerline,kenrachynski/powerline,blindFS/powerline,wfscheper/powerline,lukw00/powerline,firebitsbr/powerline,areteix/powerline,kenrachynski/powerline,bartvm/powerline,xfumihiro/powerline,Luffin/powerline,s0undt3ch/powerline,seanfisk/powerline,IvanAli/powerline,DoctorJellyface/powerline,QuLogic/powerline,S0lll0s/powerline,xfumihiro/powerline,junix/powerline,cyrixhero/powerline,Liangjianghao/powerline,s0undt3ch/powerline,Luffin/powerline,wfscheper/powerline,Luffin/powerline,dragon788/powerline,EricSB/powerline,cyrixhero/powerline,kenrachynski/powerline,Liangjianghao/powerline,Liangjianghao/powerline,darac/powerline,bezhermoso/powerline,S0lll0s/powerline,DoctorJellyface/powerline,prvnkumar/powerline,blindFS/powerline,bezhermoso/powerline,prvnkumar/powerline,dragon788/powerline,wfscheper/powerline,areteix/powerline,russellb/powerline,russellb/powerline,s0undt3ch/powerline,xxxhycl2010/powerline
|
# vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
docs: Fix building the docs without RTD theme.
Signed-off-by: Andreas Schneider <5be00ddc76278cf6077f5047ca3384a88460c671@cryptomilk.org>
|
# vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
<commit_before># vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
<commit_msg>docs: Fix building the docs without RTD theme.
Signed-off-by: Andreas Schneider <5be00ddc76278cf6077f5047ca3384a88460c671@cryptomilk.org><commit_after>
|
# vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
# vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
docs: Fix building the docs without RTD theme.
Signed-off-by: Andreas Schneider <5be00ddc76278cf6077f5047ca3384a88460c671@cryptomilk.org># vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
<commit_before># vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
<commit_msg>docs: Fix building the docs without RTD theme.
Signed-off-by: Andreas Schneider <5be00ddc76278cf6077f5047ca3384a88460c671@cryptomilk.org><commit_after># vim:fileencoding=utf-8:noet
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
d3903dcb846fec36e176a45118414c925f23aa8a
|
test/test_inelastic.py
|
test/test_inelastic.py
|
from cstool.parse_input import (parse_to_model, check_settings, cstool_model)
from cstool.inelastic import (inelastic_cs_fn)
from cslib import (units)
import numpy as np
pmma = {
"name": "pmma",
"rho_m": "1.192 g/cm³",
"fermi": "0 eV",
"work_func": "2.5 eV",
"phonon": {
"model": "dual",
"lattice": "5.43 Å",
"single": {
"alpha": "2.13e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"longitudinal": {
"alpha": "2.00e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"transversal": {
"alpha": "2.26e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"}
},
"band_gap": "5.6 eV",
"elf_file": "data/elf/df_PMMA.dat",
"elements": {
"H": {"count": 8, "Z": 1, "M": "1.008 g/mol"},
"C": {"count": 5, "Z": 6, "M": "12.011 g/mol"},
"O": {"count": 2, "Z": 8, "M": "15.999 g/mol"}}
}
def test_inelastic_cs_fn():
"""Tests that the inelastic subroutine returns a function that
can handle arrays and returns correct units."""
settings = parse_to_model(cstool_model, pmma)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
fn = inelastic_cs_fn(settings)
K = np.logspace(1, 4, 100) * units.eV
W = np.logspace(-4, 4, 100) * units.eV
cs = fn(K, W[:, None])
print(cs)
assert cs.shape == (100, 100)
assert cs.dimensionality == units('m²/eV').dimensionality
|
Add basic test for inelastic_cs_fn, similar to phonon
|
Add basic test for inelastic_cs_fn, similar to phonon
|
Python
|
apache-2.0
|
eScatter/cstool
|
Add basic test for inelastic_cs_fn, similar to phonon
|
from cstool.parse_input import (parse_to_model, check_settings, cstool_model)
from cstool.inelastic import (inelastic_cs_fn)
from cslib import (units)
import numpy as np
pmma = {
"name": "pmma",
"rho_m": "1.192 g/cm³",
"fermi": "0 eV",
"work_func": "2.5 eV",
"phonon": {
"model": "dual",
"lattice": "5.43 Å",
"single": {
"alpha": "2.13e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"longitudinal": {
"alpha": "2.00e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"transversal": {
"alpha": "2.26e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"}
},
"band_gap": "5.6 eV",
"elf_file": "data/elf/df_PMMA.dat",
"elements": {
"H": {"count": 8, "Z": 1, "M": "1.008 g/mol"},
"C": {"count": 5, "Z": 6, "M": "12.011 g/mol"},
"O": {"count": 2, "Z": 8, "M": "15.999 g/mol"}}
}
def test_inelastic_cs_fn():
"""Tests that the inelastic subroutine returns a function that
can handle arrays and returns correct units."""
settings = parse_to_model(cstool_model, pmma)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
fn = inelastic_cs_fn(settings)
K = np.logspace(1, 4, 100) * units.eV
W = np.logspace(-4, 4, 100) * units.eV
cs = fn(K, W[:, None])
print(cs)
assert cs.shape == (100, 100)
assert cs.dimensionality == units('m²/eV').dimensionality
|
<commit_before><commit_msg>Add basic test for inelastic_cs_fn, similar to phonon<commit_after>
|
from cstool.parse_input import (parse_to_model, check_settings, cstool_model)
from cstool.inelastic import (inelastic_cs_fn)
from cslib import (units)
import numpy as np
pmma = {
"name": "pmma",
"rho_m": "1.192 g/cm³",
"fermi": "0 eV",
"work_func": "2.5 eV",
"phonon": {
"model": "dual",
"lattice": "5.43 Å",
"single": {
"alpha": "2.13e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"longitudinal": {
"alpha": "2.00e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"transversal": {
"alpha": "2.26e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"}
},
"band_gap": "5.6 eV",
"elf_file": "data/elf/df_PMMA.dat",
"elements": {
"H": {"count": 8, "Z": 1, "M": "1.008 g/mol"},
"C": {"count": 5, "Z": 6, "M": "12.011 g/mol"},
"O": {"count": 2, "Z": 8, "M": "15.999 g/mol"}}
}
def test_inelastic_cs_fn():
"""Tests that the inelastic subroutine returns a function that
can handle arrays and returns correct units."""
settings = parse_to_model(cstool_model, pmma)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
fn = inelastic_cs_fn(settings)
K = np.logspace(1, 4, 100) * units.eV
W = np.logspace(-4, 4, 100) * units.eV
cs = fn(K, W[:, None])
print(cs)
assert cs.shape == (100, 100)
assert cs.dimensionality == units('m²/eV').dimensionality
|
Add basic test for inelastic_cs_fn, similar to phononfrom cstool.parse_input import (parse_to_model, check_settings, cstool_model)
from cstool.inelastic import (inelastic_cs_fn)
from cslib import (units)
import numpy as np
pmma = {
"name": "pmma",
"rho_m": "1.192 g/cm³",
"fermi": "0 eV",
"work_func": "2.5 eV",
"phonon": {
"model": "dual",
"lattice": "5.43 Å",
"single": {
"alpha": "2.13e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"longitudinal": {
"alpha": "2.00e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"transversal": {
"alpha": "2.26e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"}
},
"band_gap": "5.6 eV",
"elf_file": "data/elf/df_PMMA.dat",
"elements": {
"H": {"count": 8, "Z": 1, "M": "1.008 g/mol"},
"C": {"count": 5, "Z": 6, "M": "12.011 g/mol"},
"O": {"count": 2, "Z": 8, "M": "15.999 g/mol"}}
}
def test_inelastic_cs_fn():
"""Tests that the inelastic subroutine returns a function that
can handle arrays and returns correct units."""
settings = parse_to_model(cstool_model, pmma)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
fn = inelastic_cs_fn(settings)
K = np.logspace(1, 4, 100) * units.eV
W = np.logspace(-4, 4, 100) * units.eV
cs = fn(K, W[:, None])
print(cs)
assert cs.shape == (100, 100)
assert cs.dimensionality == units('m²/eV').dimensionality
|
<commit_before><commit_msg>Add basic test for inelastic_cs_fn, similar to phonon<commit_after>from cstool.parse_input import (parse_to_model, check_settings, cstool_model)
from cstool.inelastic import (inelastic_cs_fn)
from cslib import (units)
import numpy as np
pmma = {
"name": "pmma",
"rho_m": "1.192 g/cm³",
"fermi": "0 eV",
"work_func": "2.5 eV",
"phonon": {
"model": "dual",
"lattice": "5.43 Å",
"single": {
"alpha": "2.13e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"longitudinal": {
"alpha": "2.00e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"},
"transversal": {
"alpha": "2.26e-7 m²/s",
"c_s": "2750 m/s",
"eps_ac": "9.2 eV"}
},
"band_gap": "5.6 eV",
"elf_file": "data/elf/df_PMMA.dat",
"elements": {
"H": {"count": 8, "Z": 1, "M": "1.008 g/mol"},
"C": {"count": 5, "Z": 6, "M": "12.011 g/mol"},
"O": {"count": 2, "Z": 8, "M": "15.999 g/mol"}}
}
def test_inelastic_cs_fn():
"""Tests that the inelastic subroutine returns a function that
can handle arrays and returns correct units."""
settings = parse_to_model(cstool_model, pmma)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
fn = inelastic_cs_fn(settings)
K = np.logspace(1, 4, 100) * units.eV
W = np.logspace(-4, 4, 100) * units.eV
cs = fn(K, W[:, None])
print(cs)
assert cs.shape == (100, 100)
assert cs.dimensionality == units('m²/eV').dimensionality
|
|
191363e349735d6b51d3dbbfe471f1b51f59bffc
|
python/walk_directories.py
|
python/walk_directories.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
# GET THE ROOT DIRECTORY ######################################################
parser = argparse.ArgumentParser(description='An os.walk() snippet.')
parser.add_argument("root_dir_args", nargs=1, metavar="DIRECTORY", help="The directory to explore")
args = parser.parse_args()
root_dir = args.root_dir_args[0]
if not os.path.isdir(root_dir):
print("{0} is not a directory.".format(root_dir))
# WALK THE ROOT DIRECTORY #####################################################
#for cur_dir, dirs, files in os.walk(root_path, topdown=False):
# # cur_dir = path du répertoire courant dans l'exploration
# # dirs = liste des répertoires dans "cur_dir"
# # files = liste des fichiers dans "cur_dir"
for cur_dir, dirs, files in os.walk(root_dir, topdown=False):
for name in files:
# Print files
print(os.path.join(cur_dir, name))
for name in dirs:
# Print directories
print(os.path.join(cur_dir, name))
|
Add a snippet (walk directory).
|
Add a snippet (walk directory).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (walk directory).
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
# GET THE ROOT DIRECTORY ######################################################
parser = argparse.ArgumentParser(description='An os.walk() snippet.')
parser.add_argument("root_dir_args", nargs=1, metavar="DIRECTORY", help="The directory to explore")
args = parser.parse_args()
root_dir = args.root_dir_args[0]
if not os.path.isdir(root_dir):
print("{0} is not a directory.".format(root_dir))
# WALK THE ROOT DIRECTORY #####################################################
#for cur_dir, dirs, files in os.walk(root_path, topdown=False):
# # cur_dir = path du répertoire courant dans l'exploration
# # dirs = liste des répertoires dans "cur_dir"
# # files = liste des fichiers dans "cur_dir"
for cur_dir, dirs, files in os.walk(root_dir, topdown=False):
for name in files:
# Print files
print(os.path.join(cur_dir, name))
for name in dirs:
# Print directories
print(os.path.join(cur_dir, name))
|
<commit_before><commit_msg>Add a snippet (walk directory).<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
# GET THE ROOT DIRECTORY ######################################################
parser = argparse.ArgumentParser(description='An os.walk() snippet.')
parser.add_argument("root_dir_args", nargs=1, metavar="DIRECTORY", help="The directory to explore")
args = parser.parse_args()
root_dir = args.root_dir_args[0]
if not os.path.isdir(root_dir):
print("{0} is not a directory.".format(root_dir))
# WALK THE ROOT DIRECTORY #####################################################
#for cur_dir, dirs, files in os.walk(root_path, topdown=False):
# # cur_dir = path du répertoire courant dans l'exploration
# # dirs = liste des répertoires dans "cur_dir"
# # files = liste des fichiers dans "cur_dir"
for cur_dir, dirs, files in os.walk(root_dir, topdown=False):
for name in files:
# Print files
print(os.path.join(cur_dir, name))
for name in dirs:
# Print directories
print(os.path.join(cur_dir, name))
|
Add a snippet (walk directory).#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
# GET THE ROOT DIRECTORY ######################################################
parser = argparse.ArgumentParser(description='An os.walk() snippet.')
parser.add_argument("root_dir_args", nargs=1, metavar="DIRECTORY", help="The directory to explore")
args = parser.parse_args()
root_dir = args.root_dir_args[0]
if not os.path.isdir(root_dir):
print("{0} is not a directory.".format(root_dir))
# WALK THE ROOT DIRECTORY #####################################################
#for cur_dir, dirs, files in os.walk(root_path, topdown=False):
# # cur_dir = path du répertoire courant dans l'exploration
# # dirs = liste des répertoires dans "cur_dir"
# # files = liste des fichiers dans "cur_dir"
for cur_dir, dirs, files in os.walk(root_dir, topdown=False):
for name in files:
# Print files
print(os.path.join(cur_dir, name))
for name in dirs:
# Print directories
print(os.path.join(cur_dir, name))
|
<commit_before><commit_msg>Add a snippet (walk directory).<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
# GET THE ROOT DIRECTORY ######################################################
parser = argparse.ArgumentParser(description='An os.walk() snippet.')
parser.add_argument("root_dir_args", nargs=1, metavar="DIRECTORY", help="The directory to explore")
args = parser.parse_args()
root_dir = args.root_dir_args[0]
if not os.path.isdir(root_dir):
print("{0} is not a directory.".format(root_dir))
# WALK THE ROOT DIRECTORY #####################################################
#for cur_dir, dirs, files in os.walk(root_path, topdown=False):
# # cur_dir = path du répertoire courant dans l'exploration
# # dirs = liste des répertoires dans "cur_dir"
# # files = liste des fichiers dans "cur_dir"
for cur_dir, dirs, files in os.walk(root_dir, topdown=False):
for name in files:
# Print files
print(os.path.join(cur_dir, name))
for name in dirs:
# Print directories
print(os.path.join(cur_dir, name))
|
|
b24997980f57b4ebfdb88f688b936ba345422acf
|
tests/test___init__.py
|
tests/test___init__.py
|
import pytest
def _call_extend(crc, chunk):
import crc32c
return crc32c.extend(crc, chunk, len(chunk))
def test_extend_w_empty_chunk():
assert _call_extend(123, b'') == 123
# From: https://tools.ietf.org/html/rfc3720#appendix-B.4
iscsi_scsi_read_10_command_pdu = [
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
_EXPECTED = [
(b'', 0x00000000),
(b'\x00' * 32, 0x8a9136aa),
(b'\xff' * 32, 0x62a8ab43),
(bytes(range(32)), 0x46dd794e),
(bytes(reversed(range(32))), 0x113fdb5c),
(bytes(iscsi_scsi_read_10_command_pdu), 0xd9963a56),
]
def _call_value(chunk):
import crc32c
return crc32c.value(chunk, len(chunk))
@pytest.mark.parametrize("chunk, expected", _EXPECTED)
def test_value(chunk, expected):
assert _call_value(chunk) == expected
|
Add tests derived from RFC 3720, section B.4.
|
Add tests derived from RFC 3720, section B.4.
|
Python
|
apache-2.0
|
googleapis/python-crc32c,googleapis/python-crc32c,googleapis/python-crc32c
|
Add tests derived from RFC 3720, section B.4.
|
import pytest
def _call_extend(crc, chunk):
import crc32c
return crc32c.extend(crc, chunk, len(chunk))
def test_extend_w_empty_chunk():
assert _call_extend(123, b'') == 123
# From: https://tools.ietf.org/html/rfc3720#appendix-B.4
iscsi_scsi_read_10_command_pdu = [
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
_EXPECTED = [
(b'', 0x00000000),
(b'\x00' * 32, 0x8a9136aa),
(b'\xff' * 32, 0x62a8ab43),
(bytes(range(32)), 0x46dd794e),
(bytes(reversed(range(32))), 0x113fdb5c),
(bytes(iscsi_scsi_read_10_command_pdu), 0xd9963a56),
]
def _call_value(chunk):
import crc32c
return crc32c.value(chunk, len(chunk))
@pytest.mark.parametrize("chunk, expected", _EXPECTED)
def test_value(chunk, expected):
assert _call_value(chunk) == expected
|
<commit_before><commit_msg>Add tests derived from RFC 3720, section B.4.<commit_after>
|
import pytest
def _call_extend(crc, chunk):
import crc32c
return crc32c.extend(crc, chunk, len(chunk))
def test_extend_w_empty_chunk():
assert _call_extend(123, b'') == 123
# From: https://tools.ietf.org/html/rfc3720#appendix-B.4
iscsi_scsi_read_10_command_pdu = [
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
_EXPECTED = [
(b'', 0x00000000),
(b'\x00' * 32, 0x8a9136aa),
(b'\xff' * 32, 0x62a8ab43),
(bytes(range(32)), 0x46dd794e),
(bytes(reversed(range(32))), 0x113fdb5c),
(bytes(iscsi_scsi_read_10_command_pdu), 0xd9963a56),
]
def _call_value(chunk):
import crc32c
return crc32c.value(chunk, len(chunk))
@pytest.mark.parametrize("chunk, expected", _EXPECTED)
def test_value(chunk, expected):
assert _call_value(chunk) == expected
|
Add tests derived from RFC 3720, section B.4.import pytest
def _call_extend(crc, chunk):
import crc32c
return crc32c.extend(crc, chunk, len(chunk))
def test_extend_w_empty_chunk():
assert _call_extend(123, b'') == 123
# From: https://tools.ietf.org/html/rfc3720#appendix-B.4
iscsi_scsi_read_10_command_pdu = [
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
_EXPECTED = [
(b'', 0x00000000),
(b'\x00' * 32, 0x8a9136aa),
(b'\xff' * 32, 0x62a8ab43),
(bytes(range(32)), 0x46dd794e),
(bytes(reversed(range(32))), 0x113fdb5c),
(bytes(iscsi_scsi_read_10_command_pdu), 0xd9963a56),
]
def _call_value(chunk):
import crc32c
return crc32c.value(chunk, len(chunk))
@pytest.mark.parametrize("chunk, expected", _EXPECTED)
def test_value(chunk, expected):
assert _call_value(chunk) == expected
|
<commit_before><commit_msg>Add tests derived from RFC 3720, section B.4.<commit_after>import pytest
def _call_extend(crc, chunk):
import crc32c
return crc32c.extend(crc, chunk, len(chunk))
def test_extend_w_empty_chunk():
assert _call_extend(123, b'') == 123
# From: https://tools.ietf.org/html/rfc3720#appendix-B.4
iscsi_scsi_read_10_command_pdu = [
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
_EXPECTED = [
(b'', 0x00000000),
(b'\x00' * 32, 0x8a9136aa),
(b'\xff' * 32, 0x62a8ab43),
(bytes(range(32)), 0x46dd794e),
(bytes(reversed(range(32))), 0x113fdb5c),
(bytes(iscsi_scsi_read_10_command_pdu), 0xd9963a56),
]
def _call_value(chunk):
import crc32c
return crc32c.value(chunk, len(chunk))
@pytest.mark.parametrize("chunk, expected", _EXPECTED)
def test_value(chunk, expected):
assert _call_value(chunk) == expected
|
|
5b3069d96d03c3f3dc15370d48c47b8cec21ed86
|
bluebottle/members/migrations/0068_auto_20220923_1420.py
|
bluebottle/members/migrations/0068_auto_20220923_1420.py
|
# Generated by Django 2.2.24 on 2022-09-23 12:20
import bluebottle.bb_accounts.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members', '0067_auto_20220923_1212'),
]
operations = [
migrations.AlterField(
model_name='memberplatformsettings',
name='fiscal_month_offset',
field=models.IntegerField(blank=True, default=0, help_text='Set the number of months your fiscal year will be offset by. This will also take into account how the impact metrics are shown on the homepage. e.g. If the year starts from September (so earlier) then this value should be -4.', null=True, verbose_name='Fiscal year offset'),
),
]
|
FIx fiscal offset. Could be negative too
|
FIx fiscal offset. Could be negative too
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
FIx fiscal offset. Could be negative too
|
# Generated by Django 2.2.24 on 2022-09-23 12:20
import bluebottle.bb_accounts.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members', '0067_auto_20220923_1212'),
]
operations = [
migrations.AlterField(
model_name='memberplatformsettings',
name='fiscal_month_offset',
field=models.IntegerField(blank=True, default=0, help_text='Set the number of months your fiscal year will be offset by. This will also take into account how the impact metrics are shown on the homepage. e.g. If the year starts from September (so earlier) then this value should be -4.', null=True, verbose_name='Fiscal year offset'),
),
]
|
<commit_before><commit_msg>FIx fiscal offset. Could be negative too<commit_after>
|
# Generated by Django 2.2.24 on 2022-09-23 12:20
import bluebottle.bb_accounts.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members', '0067_auto_20220923_1212'),
]
operations = [
migrations.AlterField(
model_name='memberplatformsettings',
name='fiscal_month_offset',
field=models.IntegerField(blank=True, default=0, help_text='Set the number of months your fiscal year will be offset by. This will also take into account how the impact metrics are shown on the homepage. e.g. If the year starts from September (so earlier) then this value should be -4.', null=True, verbose_name='Fiscal year offset'),
),
]
|
FIx fiscal offset. Could be negative too# Generated by Django 2.2.24 on 2022-09-23 12:20
import bluebottle.bb_accounts.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members', '0067_auto_20220923_1212'),
]
operations = [
migrations.AlterField(
model_name='memberplatformsettings',
name='fiscal_month_offset',
field=models.IntegerField(blank=True, default=0, help_text='Set the number of months your fiscal year will be offset by. This will also take into account how the impact metrics are shown on the homepage. e.g. If the year starts from September (so earlier) then this value should be -4.', null=True, verbose_name='Fiscal year offset'),
),
]
|
<commit_before><commit_msg>FIx fiscal offset. Could be negative too<commit_after># Generated by Django 2.2.24 on 2022-09-23 12:20
import bluebottle.bb_accounts.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members', '0067_auto_20220923_1212'),
]
operations = [
migrations.AlterField(
model_name='memberplatformsettings',
name='fiscal_month_offset',
field=models.IntegerField(blank=True, default=0, help_text='Set the number of months your fiscal year will be offset by. This will also take into account how the impact metrics are shown on the homepage. e.g. If the year starts from September (so earlier) then this value should be -4.', null=True, verbose_name='Fiscal year offset'),
),
]
|
|
9f09494c538ab6dc99ffdd46f91850691c748428
|
src/p1.py
|
src/p1.py
|
def calc():
return sum(x for x in range(1000) if (x % 3) == 0 or (x % 5) == 0)
if __name__ == "__main__":
print(calc())
|
Add solution to first problem
|
Add solution to first problem
|
Python
|
mit
|
gsnedders/projecteuler
|
Add solution to first problem
|
def calc():
return sum(x for x in range(1000) if (x % 3) == 0 or (x % 5) == 0)
if __name__ == "__main__":
print(calc())
|
<commit_before><commit_msg>Add solution to first problem<commit_after>
|
def calc():
return sum(x for x in range(1000) if (x % 3) == 0 or (x % 5) == 0)
if __name__ == "__main__":
print(calc())
|
Add solution to first problemdef calc():
return sum(x for x in range(1000) if (x % 3) == 0 or (x % 5) == 0)
if __name__ == "__main__":
print(calc())
|
<commit_before><commit_msg>Add solution to first problem<commit_after>def calc():
return sum(x for x in range(1000) if (x % 3) == 0 or (x % 5) == 0)
if __name__ == "__main__":
print(calc())
|
|
3363e01f99cd60d23029d25c7102abdafa5aeacc
|
test/test_spam_check.py
|
test/test_spam_check.py
|
from sendgrid.helpers.mail.spam_check import SpamCheck
try:
import unittest2 as unittest
except ImportError:
import unittest
class UnitTests(unittest.TestCase):
def test_spam_all_values(self):
expected = {'enable': True, 'threshold': 5, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_no_url(self):
expected = {'enable': True, 'threshold': 10}
spam_check = SpamCheck(enable=True, threshold=10)
self.assertEqual(spam_check.get(), expected)
def test_spam_no_threshold(self):
expected = {'enable': True}
spam_check = SpamCheck(enable=True)
self.assertEqual(spam_check.get(), expected)
def test_has_values_but_not_enabled(self):
expected = {'enable': False, 'threshold': 1, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=False, threshold=1, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_change_properties(self):
expected = {'enable': False, 'threshold': 10, 'post_to_url': 'https://www.testing.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
spam_check.enable = False
spam_check.threshold = 10
spam_check.post_to_url = 'https://www.testing.com'
self.assertEqual(spam_check.get(), expected)
|
Add unit tests for spam check
|
Add unit tests for spam check
|
Python
|
mit
|
sendgrid/sendgrid-python,sendgrid/sendgrid-python,sendgrid/sendgrid-python
|
Add unit tests for spam check
|
from sendgrid.helpers.mail.spam_check import SpamCheck
try:
import unittest2 as unittest
except ImportError:
import unittest
class UnitTests(unittest.TestCase):
def test_spam_all_values(self):
expected = {'enable': True, 'threshold': 5, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_no_url(self):
expected = {'enable': True, 'threshold': 10}
spam_check = SpamCheck(enable=True, threshold=10)
self.assertEqual(spam_check.get(), expected)
def test_spam_no_threshold(self):
expected = {'enable': True}
spam_check = SpamCheck(enable=True)
self.assertEqual(spam_check.get(), expected)
def test_has_values_but_not_enabled(self):
expected = {'enable': False, 'threshold': 1, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=False, threshold=1, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_change_properties(self):
expected = {'enable': False, 'threshold': 10, 'post_to_url': 'https://www.testing.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
spam_check.enable = False
spam_check.threshold = 10
spam_check.post_to_url = 'https://www.testing.com'
self.assertEqual(spam_check.get(), expected)
|
<commit_before><commit_msg>Add unit tests for spam check<commit_after>
|
from sendgrid.helpers.mail.spam_check import SpamCheck
try:
import unittest2 as unittest
except ImportError:
import unittest
class UnitTests(unittest.TestCase):
def test_spam_all_values(self):
expected = {'enable': True, 'threshold': 5, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_no_url(self):
expected = {'enable': True, 'threshold': 10}
spam_check = SpamCheck(enable=True, threshold=10)
self.assertEqual(spam_check.get(), expected)
def test_spam_no_threshold(self):
expected = {'enable': True}
spam_check = SpamCheck(enable=True)
self.assertEqual(spam_check.get(), expected)
def test_has_values_but_not_enabled(self):
expected = {'enable': False, 'threshold': 1, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=False, threshold=1, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_change_properties(self):
expected = {'enable': False, 'threshold': 10, 'post_to_url': 'https://www.testing.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
spam_check.enable = False
spam_check.threshold = 10
spam_check.post_to_url = 'https://www.testing.com'
self.assertEqual(spam_check.get(), expected)
|
Add unit tests for spam checkfrom sendgrid.helpers.mail.spam_check import SpamCheck
try:
import unittest2 as unittest
except ImportError:
import unittest
class UnitTests(unittest.TestCase):
def test_spam_all_values(self):
expected = {'enable': True, 'threshold': 5, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_no_url(self):
expected = {'enable': True, 'threshold': 10}
spam_check = SpamCheck(enable=True, threshold=10)
self.assertEqual(spam_check.get(), expected)
def test_spam_no_threshold(self):
expected = {'enable': True}
spam_check = SpamCheck(enable=True)
self.assertEqual(spam_check.get(), expected)
def test_has_values_but_not_enabled(self):
expected = {'enable': False, 'threshold': 1, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=False, threshold=1, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_change_properties(self):
expected = {'enable': False, 'threshold': 10, 'post_to_url': 'https://www.testing.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
spam_check.enable = False
spam_check.threshold = 10
spam_check.post_to_url = 'https://www.testing.com'
self.assertEqual(spam_check.get(), expected)
|
<commit_before><commit_msg>Add unit tests for spam check<commit_after>from sendgrid.helpers.mail.spam_check import SpamCheck
try:
import unittest2 as unittest
except ImportError:
import unittest
class UnitTests(unittest.TestCase):
def test_spam_all_values(self):
expected = {'enable': True, 'threshold': 5, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_no_url(self):
expected = {'enable': True, 'threshold': 10}
spam_check = SpamCheck(enable=True, threshold=10)
self.assertEqual(spam_check.get(), expected)
def test_spam_no_threshold(self):
expected = {'enable': True}
spam_check = SpamCheck(enable=True)
self.assertEqual(spam_check.get(), expected)
def test_has_values_but_not_enabled(self):
expected = {'enable': False, 'threshold': 1, 'post_to_url': 'https://www.test.com'}
spam_check = SpamCheck(enable=False, threshold=1, post_to_url='https://www.test.com')
self.assertEqual(spam_check.get(), expected)
def test_spam_change_properties(self):
expected = {'enable': False, 'threshold': 10, 'post_to_url': 'https://www.testing.com'}
spam_check = SpamCheck(enable=True, threshold=5, post_to_url='https://www.test.com')
spam_check.enable = False
spam_check.threshold = 10
spam_check.post_to_url = 'https://www.testing.com'
self.assertEqual(spam_check.get(), expected)
|
|
84251f94bf82bf521533f707fc0c93d13ec39efc
|
newton.py
|
newton.py
|
import timeit
import numpy as np
from scipy.linalg import lu_factor, lu_solve
def newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = np.linalg.solve(jacobian(x), -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def simplified_newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
factor = lu_factor(jacobian(x0))
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = lu_solve(factor, -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def main():
func = lambda x: np.array([np.sqrt(x[0]) - np.sin(x[1]), x[0] ** 2. + x[1] ** 2. - 1])
jacobian = lambda x: np.array([[1. / (2. * np.sqrt(x[0])), -np.cos(x[1])],
[2. * x[0], 2. * x[1]]])
x0 = np.array([.5, np.pi / 2.])
print('solving using Newton ...')
start = timeit.default_timer()
x = newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
print('solving using simplified Newton ...')
start = timeit.default_timer()
x = simplified_newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
if __name__ == '__main__':
main()
|
Add Newton solver for non-linear equations
|
Add Newton solver for non-linear equations
|
Python
|
mit
|
matthiasplappert/math-algorithms
|
Add Newton solver for non-linear equations
|
import timeit
import numpy as np
from scipy.linalg import lu_factor, lu_solve
def newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = np.linalg.solve(jacobian(x), -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def simplified_newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
factor = lu_factor(jacobian(x0))
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = lu_solve(factor, -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def main():
func = lambda x: np.array([np.sqrt(x[0]) - np.sin(x[1]), x[0] ** 2. + x[1] ** 2. - 1])
jacobian = lambda x: np.array([[1. / (2. * np.sqrt(x[0])), -np.cos(x[1])],
[2. * x[0], 2. * x[1]]])
x0 = np.array([.5, np.pi / 2.])
print('solving using Newton ...')
start = timeit.default_timer()
x = newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
print('solving using simplified Newton ...')
start = timeit.default_timer()
x = simplified_newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Newton solver for non-linear equations<commit_after>
|
import timeit
import numpy as np
from scipy.linalg import lu_factor, lu_solve
def newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = np.linalg.solve(jacobian(x), -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def simplified_newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
factor = lu_factor(jacobian(x0))
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = lu_solve(factor, -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def main():
func = lambda x: np.array([np.sqrt(x[0]) - np.sin(x[1]), x[0] ** 2. + x[1] ** 2. - 1])
jacobian = lambda x: np.array([[1. / (2. * np.sqrt(x[0])), -np.cos(x[1])],
[2. * x[0], 2. * x[1]]])
x0 = np.array([.5, np.pi / 2.])
print('solving using Newton ...')
start = timeit.default_timer()
x = newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
print('solving using simplified Newton ...')
start = timeit.default_timer()
x = simplified_newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
if __name__ == '__main__':
main()
|
Add Newton solver for non-linear equationsimport timeit
import numpy as np
from scipy.linalg import lu_factor, lu_solve
def newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = np.linalg.solve(jacobian(x), -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def simplified_newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
factor = lu_factor(jacobian(x0))
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = lu_solve(factor, -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def main():
func = lambda x: np.array([np.sqrt(x[0]) - np.sin(x[1]), x[0] ** 2. + x[1] ** 2. - 1])
jacobian = lambda x: np.array([[1. / (2. * np.sqrt(x[0])), -np.cos(x[1])],
[2. * x[0], 2. * x[1]]])
x0 = np.array([.5, np.pi / 2.])
print('solving using Newton ...')
start = timeit.default_timer()
x = newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
print('solving using simplified Newton ...')
start = timeit.default_timer()
x = simplified_newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Newton solver for non-linear equations<commit_after>import timeit
import numpy as np
from scipy.linalg import lu_factor, lu_solve
def newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = np.linalg.solve(jacobian(x), -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def simplified_newton(x0, func, jacobian, tol=1e-2, verbose=False):
dx = None
x = np.copy(x0)
step = 0
factor = lu_factor(jacobian(x0))
while dx is None or np.linalg.norm(dx) > tol:
step += 1
dx = lu_solve(factor, -func(x))
x += dx
if verbose:
print('step %.3d: %s' % (step, x))
return x
def main():
func = lambda x: np.array([np.sqrt(x[0]) - np.sin(x[1]), x[0] ** 2. + x[1] ** 2. - 1])
jacobian = lambda x: np.array([[1. / (2. * np.sqrt(x[0])), -np.cos(x[1])],
[2. * x[0], 2. * x[1]]])
x0 = np.array([.5, np.pi / 2.])
print('solving using Newton ...')
start = timeit.default_timer()
x = newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
print('solving using simplified Newton ...')
start = timeit.default_timer()
x = simplified_newton(x0, func, jacobian, tol=1e-10, verbose=True)
if np.allclose(func(x), 0.):
print('solution: %s' % x)
else:
print('failed')
print('done, took %fs' % (timeit.default_timer() - start))
print('')
if __name__ == '__main__':
main()
|
|
754437642a69338deca196d7758ee37c7d3baffe
|
tools/make_test_data.py
|
tools/make_test_data.py
|
#!/usr/bin/env python
"""Use real bigquery data to create a tinyquery table.
This makes it easier to generate tests for existing queries, since we don't
have to construct the data by hand. Yay!
We assume that you've created application default gcloud credentials, and that
you have the project set to the one you want to use.
Usage: tools/make_test_data.py <dataset> <table>
This will print out python code suitable for creating the tinyquery table
object containing 5 rows of data from the specified bigquery table.
"""
import json
import subprocess
import sys
def json_bq_command(*args):
return json.loads(subprocess.check_output(
['bq', '--format', 'prettyjson'] + list(args)))
def fetch_table_schema(dataset, table):
return json_bq_command(
'show', '%s.%s' % (dataset, table))['schema']['fields']
def sample_table_data(dataset, table):
return json_bq_command(
'head', '-n', '5', '%s.%s' % (dataset, table))
def get_column_data(rows, column):
return [
repr(row.get(column['name']))
for row in rows
]
def make_column(column, data):
return "('%s', context.Column(type='%s', mode='%s', values=[%s]))," % (
column['name'],
column['type'],
column['mode'],
', '.join(data)
)
def write_sample_table_code(dataset, table):
schema = fetch_table_schema(dataset, table)
rows = sample_table_data(dataset, table)
indent = ' ' * 8
column_lines = [
indent + make_column(column,
get_column_data(rows, column))
for column in schema
]
return """tinyquery.table(
'%s',
%d,
collections.OrderedDict([
%s
]))
""" % (table, len(rows), '\n'.join(column_lines))
if __name__ == '__main__':
print write_sample_table_code(sys.argv[1], sys.argv[2])
|
Add a script to generate a tinyquery table literal from data in bigquery
|
Add a script to generate a tinyquery table literal from data in bigquery
Summary:
Creating tinyquery tables by hand is kind of a pain for tables of any
complexity. This revision adds a script that takes a dataset and table on the
command line and generates a tinyquery table literal (i.e. the python code that
creates a tinyquery.Table object) containing a few rows of data from the
specified table. This should simplify making test cases for our actual queries.
Note that mode is not yet handled, and record fields will not work either.
Those will be handled later.
Test Plan:
- `tools/make_test_data.py tinyquery test_table`
- `tools/make_test_data.py tinyquery string_table`
- verify that the code generated matches what is in evaluator_test.py
Reviewers: samantha
Reviewed By: samantha
Subscribers: tom
Differential Revision: https://phabricator.khanacademy.org/D32390
|
Python
|
mit
|
Khan/tinyquery
|
Add a script to generate a tinyquery table literal from data in bigquery
Summary:
Creating tinyquery tables by hand is kind of a pain for tables of any
complexity. This revision adds a script that takes a dataset and table on the
command line and generates a tinyquery table literal (i.e. the python code that
creates a tinyquery.Table object) containing a few rows of data from the
specified table. This should simplify making test cases for our actual queries.
Note that mode is not yet handled, and record fields will not work either.
Those will be handled later.
Test Plan:
- `tools/make_test_data.py tinyquery test_table`
- `tools/make_test_data.py tinyquery string_table`
- verify that the code generated matches what is in evaluator_test.py
Reviewers: samantha
Reviewed By: samantha
Subscribers: tom
Differential Revision: https://phabricator.khanacademy.org/D32390
|
#!/usr/bin/env python
"""Use real bigquery data to create a tinyquery table.
This makes it easier to generate tests for existing queries, since we don't
have to construct the data by hand. Yay!
We assume that you've created application default gcloud credentials, and that
you have the project set to the one you want to use.
Usage: tools/make_test_data.py <dataset> <table>
This will print out python code suitable for creating the tinyquery table
object containing 5 rows of data from the specified bigquery table.
"""
import json
import subprocess
import sys
def json_bq_command(*args):
return json.loads(subprocess.check_output(
['bq', '--format', 'prettyjson'] + list(args)))
def fetch_table_schema(dataset, table):
return json_bq_command(
'show', '%s.%s' % (dataset, table))['schema']['fields']
def sample_table_data(dataset, table):
return json_bq_command(
'head', '-n', '5', '%s.%s' % (dataset, table))
def get_column_data(rows, column):
return [
repr(row.get(column['name']))
for row in rows
]
def make_column(column, data):
return "('%s', context.Column(type='%s', mode='%s', values=[%s]))," % (
column['name'],
column['type'],
column['mode'],
', '.join(data)
)
def write_sample_table_code(dataset, table):
schema = fetch_table_schema(dataset, table)
rows = sample_table_data(dataset, table)
indent = ' ' * 8
column_lines = [
indent + make_column(column,
get_column_data(rows, column))
for column in schema
]
return """tinyquery.table(
'%s',
%d,
collections.OrderedDict([
%s
]))
""" % (table, len(rows), '\n'.join(column_lines))
if __name__ == '__main__':
print write_sample_table_code(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add a script to generate a tinyquery table literal from data in bigquery
Summary:
Creating tinyquery tables by hand is kind of a pain for tables of any
complexity. This revision adds a script that takes a dataset and table on the
command line and generates a tinyquery table literal (i.e. the python code that
creates a tinyquery.Table object) containing a few rows of data from the
specified table. This should simplify making test cases for our actual queries.
Note that mode is not yet handled, and record fields will not work either.
Those will be handled later.
Test Plan:
- `tools/make_test_data.py tinyquery test_table`
- `tools/make_test_data.py tinyquery string_table`
- verify that the code generated matches what is in evaluator_test.py
Reviewers: samantha
Reviewed By: samantha
Subscribers: tom
Differential Revision: https://phabricator.khanacademy.org/D32390<commit_after>
|
#!/usr/bin/env python
"""Use real bigquery data to create a tinyquery table.
This makes it easier to generate tests for existing queries, since we don't
have to construct the data by hand. Yay!
We assume that you've created application default gcloud credentials, and that
you have the project set to the one you want to use.
Usage: tools/make_test_data.py <dataset> <table>
This will print out python code suitable for creating the tinyquery table
object containing 5 rows of data from the specified bigquery table.
"""
import json
import subprocess
import sys
def json_bq_command(*args):
return json.loads(subprocess.check_output(
['bq', '--format', 'prettyjson'] + list(args)))
def fetch_table_schema(dataset, table):
return json_bq_command(
'show', '%s.%s' % (dataset, table))['schema']['fields']
def sample_table_data(dataset, table):
return json_bq_command(
'head', '-n', '5', '%s.%s' % (dataset, table))
def get_column_data(rows, column):
return [
repr(row.get(column['name']))
for row in rows
]
def make_column(column, data):
return "('%s', context.Column(type='%s', mode='%s', values=[%s]))," % (
column['name'],
column['type'],
column['mode'],
', '.join(data)
)
def write_sample_table_code(dataset, table):
schema = fetch_table_schema(dataset, table)
rows = sample_table_data(dataset, table)
indent = ' ' * 8
column_lines = [
indent + make_column(column,
get_column_data(rows, column))
for column in schema
]
return """tinyquery.table(
'%s',
%d,
collections.OrderedDict([
%s
]))
""" % (table, len(rows), '\n'.join(column_lines))
if __name__ == '__main__':
print write_sample_table_code(sys.argv[1], sys.argv[2])
|
Add a script to generate a tinyquery table literal from data in bigquery
Summary:
Creating tinyquery tables by hand is kind of a pain for tables of any
complexity. This revision adds a script that takes a dataset and table on the
command line and generates a tinyquery table literal (i.e. the python code that
creates a tinyquery.Table object) containing a few rows of data from the
specified table. This should simplify making test cases for our actual queries.
Note that mode is not yet handled, and record fields will not work either.
Those will be handled later.
Test Plan:
- `tools/make_test_data.py tinyquery test_table`
- `tools/make_test_data.py tinyquery string_table`
- verify that the code generated matches what is in evaluator_test.py
Reviewers: samantha
Reviewed By: samantha
Subscribers: tom
Differential Revision: https://phabricator.khanacademy.org/D32390#!/usr/bin/env python
"""Use real bigquery data to create a tinyquery table.
This makes it easier to generate tests for existing queries, since we don't
have to construct the data by hand. Yay!
We assume that you've created application default gcloud credentials, and that
you have the project set to the one you want to use.
Usage: tools/make_test_data.py <dataset> <table>
This will print out python code suitable for creating the tinyquery table
object containing 5 rows of data from the specified bigquery table.
"""
import json
import subprocess
import sys
def json_bq_command(*args):
return json.loads(subprocess.check_output(
['bq', '--format', 'prettyjson'] + list(args)))
def fetch_table_schema(dataset, table):
return json_bq_command(
'show', '%s.%s' % (dataset, table))['schema']['fields']
def sample_table_data(dataset, table):
return json_bq_command(
'head', '-n', '5', '%s.%s' % (dataset, table))
def get_column_data(rows, column):
return [
repr(row.get(column['name']))
for row in rows
]
def make_column(column, data):
return "('%s', context.Column(type='%s', mode='%s', values=[%s]))," % (
column['name'],
column['type'],
column['mode'],
', '.join(data)
)
def write_sample_table_code(dataset, table):
schema = fetch_table_schema(dataset, table)
rows = sample_table_data(dataset, table)
indent = ' ' * 8
column_lines = [
indent + make_column(column,
get_column_data(rows, column))
for column in schema
]
return """tinyquery.table(
'%s',
%d,
collections.OrderedDict([
%s
]))
""" % (table, len(rows), '\n'.join(column_lines))
if __name__ == '__main__':
print write_sample_table_code(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add a script to generate a tinyquery table literal from data in bigquery
Summary:
Creating tinyquery tables by hand is kind of a pain for tables of any
complexity. This revision adds a script that takes a dataset and table on the
command line and generates a tinyquery table literal (i.e. the python code that
creates a tinyquery.Table object) containing a few rows of data from the
specified table. This should simplify making test cases for our actual queries.
Note that mode is not yet handled, and record fields will not work either.
Those will be handled later.
Test Plan:
- `tools/make_test_data.py tinyquery test_table`
- `tools/make_test_data.py tinyquery string_table`
- verify that the code generated matches what is in evaluator_test.py
Reviewers: samantha
Reviewed By: samantha
Subscribers: tom
Differential Revision: https://phabricator.khanacademy.org/D32390<commit_after>#!/usr/bin/env python
"""Use real bigquery data to create a tinyquery table.
This makes it easier to generate tests for existing queries, since we don't
have to construct the data by hand. Yay!
We assume that you've created application default gcloud credentials, and that
you have the project set to the one you want to use.
Usage: tools/make_test_data.py <dataset> <table>
This will print out python code suitable for creating the tinyquery table
object containing 5 rows of data from the specified bigquery table.
"""
import json
import subprocess
import sys
def json_bq_command(*args):
return json.loads(subprocess.check_output(
['bq', '--format', 'prettyjson'] + list(args)))
def fetch_table_schema(dataset, table):
return json_bq_command(
'show', '%s.%s' % (dataset, table))['schema']['fields']
def sample_table_data(dataset, table):
return json_bq_command(
'head', '-n', '5', '%s.%s' % (dataset, table))
def get_column_data(rows, column):
return [
repr(row.get(column['name']))
for row in rows
]
def make_column(column, data):
return "('%s', context.Column(type='%s', mode='%s', values=[%s]))," % (
column['name'],
column['type'],
column['mode'],
', '.join(data)
)
def write_sample_table_code(dataset, table):
schema = fetch_table_schema(dataset, table)
rows = sample_table_data(dataset, table)
indent = ' ' * 8
column_lines = [
indent + make_column(column,
get_column_data(rows, column))
for column in schema
]
return """tinyquery.table(
'%s',
%d,
collections.OrderedDict([
%s
]))
""" % (table, len(rows), '\n'.join(column_lines))
if __name__ == '__main__':
print write_sample_table_code(sys.argv[1], sys.argv[2])
|
|
fe13379bd4b1cd323c433bfaa8ab75368df3d8c5
|
midterm/problem5.py
|
midterm/problem5.py
|
# Problem 5
# 10.0 points possible (graded)
# Write a Python function that returns the sum of the pairwise products of listA and listB. You should assume that listA and listB have the same length and are two lists of integer numbers. For example, if listA = [1, 2, 3] and listB = [4, 5, 6], the dot product is 1*4 + 2*5 + 3*6, meaning your function should return: 32
# Hint: You will need to traverse both lists in parallel.
# This function takes in two lists of numbers and returns a number.
def dotProduct(listA, listB):
'''
listA: a list of numbers
listB: a list of numbers of the same length as listA
'''
sum = 0
for x in range(len(listA)):
sum += listA[x] * listB[x]
return sum
listA = [1, 2, 3]
listB = [4, 5, 6]
print(dotProduct(listA, listB))
|
Write a Python function that returns the sum of the pairwise products of two lists
|
Write a Python function that returns the sum of the pairwise products of two lists
|
Python
|
mit
|
Kunal57/MIT_6.00.1x
|
Write a Python function that returns the sum of the pairwise products of two lists
|
# Problem 5
# 10.0 points possible (graded)
# Write a Python function that returns the sum of the pairwise products of listA and listB. You should assume that listA and listB have the same length and are two lists of integer numbers. For example, if listA = [1, 2, 3] and listB = [4, 5, 6], the dot product is 1*4 + 2*5 + 3*6, meaning your function should return: 32
# Hint: You will need to traverse both lists in parallel.
# This function takes in two lists of numbers and returns a number.
def dotProduct(listA, listB):
'''
listA: a list of numbers
listB: a list of numbers of the same length as listA
'''
sum = 0
for x in range(len(listA)):
sum += listA[x] * listB[x]
return sum
listA = [1, 2, 3]
listB = [4, 5, 6]
print(dotProduct(listA, listB))
|
<commit_before><commit_msg>Write a Python function that returns the sum of the pairwise products of two lists<commit_after>
|
# Problem 5
# 10.0 points possible (graded)
# Write a Python function that returns the sum of the pairwise products of listA and listB. You should assume that listA and listB have the same length and are two lists of integer numbers. For example, if listA = [1, 2, 3] and listB = [4, 5, 6], the dot product is 1*4 + 2*5 + 3*6, meaning your function should return: 32
# Hint: You will need to traverse both lists in parallel.
# This function takes in two lists of numbers and returns a number.
def dotProduct(listA, listB):
'''
listA: a list of numbers
listB: a list of numbers of the same length as listA
'''
sum = 0
for x in range(len(listA)):
sum += listA[x] * listB[x]
return sum
listA = [1, 2, 3]
listB = [4, 5, 6]
print(dotProduct(listA, listB))
|
Write a Python function that returns the sum of the pairwise products of two lists# Problem 5
# 10.0 points possible (graded)
# Write a Python function that returns the sum of the pairwise products of listA and listB. You should assume that listA and listB have the same length and are two lists of integer numbers. For example, if listA = [1, 2, 3] and listB = [4, 5, 6], the dot product is 1*4 + 2*5 + 3*6, meaning your function should return: 32
# Hint: You will need to traverse both lists in parallel.
# This function takes in two lists of numbers and returns a number.
def dotProduct(listA, listB):
'''
listA: a list of numbers
listB: a list of numbers of the same length as listA
'''
sum = 0
for x in range(len(listA)):
sum += listA[x] * listB[x]
return sum
listA = [1, 2, 3]
listB = [4, 5, 6]
print(dotProduct(listA, listB))
|
<commit_before><commit_msg>Write a Python function that returns the sum of the pairwise products of two lists<commit_after># Problem 5
# 10.0 points possible (graded)
# Write a Python function that returns the sum of the pairwise products of listA and listB. You should assume that listA and listB have the same length and are two lists of integer numbers. For example, if listA = [1, 2, 3] and listB = [4, 5, 6], the dot product is 1*4 + 2*5 + 3*6, meaning your function should return: 32
# Hint: You will need to traverse both lists in parallel.
# This function takes in two lists of numbers and returns a number.
def dotProduct(listA, listB):
'''
listA: a list of numbers
listB: a list of numbers of the same length as listA
'''
sum = 0
for x in range(len(listA)):
sum += listA[x] * listB[x]
return sum
listA = [1, 2, 3]
listB = [4, 5, 6]
print(dotProduct(listA, listB))
|
|
cbcc97af0bc0710358dc04ba927ccde2ef70be8f
|
cerbero/commands/add_recipe.py
|
cerbero/commands/add_recipe.py
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
RECEIPT_TPL =\
'''from cerbero import recipe
class Recipe(recipe.Recipe):
name = '%(name)s'
version = '%(version)s'
'''
class AddRecipe(Command):
doc = N_('Adds a new recipe')
name = 'add-recipe'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('name', nargs=1,
help=_('name of the recipe to build')),
ArgparseArgument('version', nargs=1,
help=_('version of the recipe to build')),
])
def run(self, config, args):
name = args.name[0]
version = args.version[0]
try:
f = open(os.path.join(config.recipes_dir, '%s.recipe' % name), 'w')
f.write(RECEIPT_TPL % {'name': name, 'version': version})
f.close()
except IOError, ex:
raise FatalError(_("Error creating recipe: %s", ex))
register_command(AddRecipe)
|
Add command to add new recipes
|
Add command to add new recipes
|
Python
|
lgpl-2.1
|
centricular/cerbero,fluendo/cerbero,nirbheek/cerbero,shoreflyer/cerbero,ramaxlo/cerbero,nicolewu/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,atsushieno/cerbero,nirbheek/cerbero-old,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,justinjoy/cerbero,lubosz/cerbero,nzjrs/cerbero,davibe/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,lubosz/cerbero,jackjansen/cerbero,davibe/cerbero,centricular/cerbero,sdroege/cerbero,sdroege/cerbero,ikonst/cerbero,AlertMe/cerbero,ramaxlo/cerbero,brion/cerbero,AlertMe/cerbero,EricssonResearch/cerbero,ford-prefect/cerbero,ikonst/cerbero,fluendo/cerbero,flexVDI/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,superdump/cerbero,centricular/cerbero,davibe/cerbero,ylatuya/cerbero,OptoFidelity/cerbero,BigBrother-International/gst-cerbero,shoreflyer/cerbero,nirbheek/cerbero,ramaxlo/cerbero,fluendo/cerbero,ford-prefect/cerbero,ylatuya/cerbero,multipath-rtp/cerbero,centricular/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,atsushieno/cerbero,superdump/cerbero,flexVDI/cerbero,ikonst/cerbero,nirbheek/cerbero-old,nirbheek/cerbero-old,EricssonResearch/cerbero,brion/cerbero,multipath-rtp/cerbero,GStreamer/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,multipath-rtp/cerbero,brion/cerbero,EricssonResearch/cerbero,justinjoy/cerbero,nzjrs/cerbero,BigBrother-International/gst-cerbero,cee1/cerbero-mac,ramaxlo/cerbero,lubosz/cerbero,BigBrother-International/gst-cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,nirbheek/cerbero,AlertMe/cerbero,cee1/cerbero-mac,jackjansen/cerbero-2013,ford-prefect/cerbero,jackjansen/cerbero-2013,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,atsushieno/cerbero,ikonst/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,sdroege/cerbero,jackjansen/cerbero-2013,AlertMe/cerbero,flexVDI/cerbero,ylatuya/cerbero,sdroege/cerbero,jackjansen/cerbero,shoreflyer/cerbero,justinjoy/cerbero,atsushieno/cerbero,lubosz/cerbero,nzjrs/cerbero,nirbheek/cerbero-old,GStreamer/cerbero,fluendo/cerbero,nicolewu/cerbero,ford-prefect/cerbero,cee1/cerbero-mac,multipath-rtp/cerbero,BigBrother-International/gst-cerbero,jackjansen/cerbero-2013,atsushieno/cerbero,davibe/cerbero,nicolewu/cerbero,cee1/cerbero-mac,GStreamer/cerbero,fluendo/cerbero,justinjoy/cerbero,superdump/cerbero,AlertMe/cerbero,jackjansen/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,GStreamer/cerbero,jackjansen/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,OptoFidelity/cerbero,multipath-rtp/cerbero,OptoFidelity/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,EricssonResearch/cerbero,EricssonResearch/cerbero,OptoFidelity/cerbero,nirbheek/cerbero,jackjansen/cerbero-2013,superdump/cerbero,flexVDI/cerbero,sdroege/cerbero,shoreflyer/cerbero,shoreflyer/cerbero,brion/cerbero,ramaxlo/cerbero,flexVDI/cerbero,BigBrother-International/gst-cerbero,ikonst/cerbero,GStreamer/cerbero,brion/cerbero,nzjrs/cerbero,nzjrs/cerbero,centricular/cerbero,ylatuya/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero
|
Add command to add new recipes
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
RECEIPT_TPL =\
'''from cerbero import recipe
class Recipe(recipe.Recipe):
name = '%(name)s'
version = '%(version)s'
'''
class AddRecipe(Command):
doc = N_('Adds a new recipe')
name = 'add-recipe'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('name', nargs=1,
help=_('name of the recipe to build')),
ArgparseArgument('version', nargs=1,
help=_('version of the recipe to build')),
])
def run(self, config, args):
name = args.name[0]
version = args.version[0]
try:
f = open(os.path.join(config.recipes_dir, '%s.recipe' % name), 'w')
f.write(RECEIPT_TPL % {'name': name, 'version': version})
f.close()
except IOError, ex:
raise FatalError(_("Error creating recipe: %s", ex))
register_command(AddRecipe)
|
<commit_before><commit_msg>Add command to add new recipes<commit_after>
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
RECEIPT_TPL =\
'''from cerbero import recipe
class Recipe(recipe.Recipe):
name = '%(name)s'
version = '%(version)s'
'''
class AddRecipe(Command):
doc = N_('Adds a new recipe')
name = 'add-recipe'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('name', nargs=1,
help=_('name of the recipe to build')),
ArgparseArgument('version', nargs=1,
help=_('version of the recipe to build')),
])
def run(self, config, args):
name = args.name[0]
version = args.version[0]
try:
f = open(os.path.join(config.recipes_dir, '%s.recipe' % name), 'w')
f.write(RECEIPT_TPL % {'name': name, 'version': version})
f.close()
except IOError, ex:
raise FatalError(_("Error creating recipe: %s", ex))
register_command(AddRecipe)
|
Add command to add new recipes# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
RECEIPT_TPL =\
'''from cerbero import recipe
class Recipe(recipe.Recipe):
name = '%(name)s'
version = '%(version)s'
'''
class AddRecipe(Command):
doc = N_('Adds a new recipe')
name = 'add-recipe'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('name', nargs=1,
help=_('name of the recipe to build')),
ArgparseArgument('version', nargs=1,
help=_('version of the recipe to build')),
])
def run(self, config, args):
name = args.name[0]
version = args.version[0]
try:
f = open(os.path.join(config.recipes_dir, '%s.recipe' % name), 'w')
f.write(RECEIPT_TPL % {'name': name, 'version': version})
f.close()
except IOError, ex:
raise FatalError(_("Error creating recipe: %s", ex))
register_command(AddRecipe)
|
<commit_before><commit_msg>Add command to add new recipes<commit_after># cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
RECEIPT_TPL =\
'''from cerbero import recipe
class Recipe(recipe.Recipe):
name = '%(name)s'
version = '%(version)s'
'''
class AddRecipe(Command):
doc = N_('Adds a new recipe')
name = 'add-recipe'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('name', nargs=1,
help=_('name of the recipe to build')),
ArgparseArgument('version', nargs=1,
help=_('version of the recipe to build')),
])
def run(self, config, args):
name = args.name[0]
version = args.version[0]
try:
f = open(os.path.join(config.recipes_dir, '%s.recipe' % name), 'w')
f.write(RECEIPT_TPL % {'name': name, 'version': version})
f.close()
except IOError, ex:
raise FatalError(_("Error creating recipe: %s", ex))
register_command(AddRecipe)
|
|
0235330fa2b58e166a25a90714436e9503c6c5a9
|
examples/plot-results.py
|
examples/plot-results.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import pickle
from matplotlib import pyplot as plt
if len(sys.argv) < 2:
sys.exit("Usage: plot-results.py <pickle file>")
with open(sys.argv[1], 'rb') as pf:
results = pickle.load(pf)
lines = []
for num, result in results.items():
x, y = zip(*sorted(result.items()))
label = '%i Component%s' % (num, '' if num == 1 else 's')
lines.extend(plt.plot(x, y, label=label))
plt.ylabel("Time (ms)")
plt.xlabel("# Entities")
plt.legend(handles=lines, bbox_to_anchor=(0.5, 1))
plt.show()
|
Add script to plot benchmark results with matplotlib
|
Add script to plot benchmark results with matplotlib
|
Python
|
mit
|
benmoran56/esper
|
Add script to plot benchmark results with matplotlib
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import pickle
from matplotlib import pyplot as plt
if len(sys.argv) < 2:
sys.exit("Usage: plot-results.py <pickle file>")
with open(sys.argv[1], 'rb') as pf:
results = pickle.load(pf)
lines = []
for num, result in results.items():
x, y = zip(*sorted(result.items()))
label = '%i Component%s' % (num, '' if num == 1 else 's')
lines.extend(plt.plot(x, y, label=label))
plt.ylabel("Time (ms)")
plt.xlabel("# Entities")
plt.legend(handles=lines, bbox_to_anchor=(0.5, 1))
plt.show()
|
<commit_before><commit_msg>Add script to plot benchmark results with matplotlib<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import pickle
from matplotlib import pyplot as plt
if len(sys.argv) < 2:
sys.exit("Usage: plot-results.py <pickle file>")
with open(sys.argv[1], 'rb') as pf:
results = pickle.load(pf)
lines = []
for num, result in results.items():
x, y = zip(*sorted(result.items()))
label = '%i Component%s' % (num, '' if num == 1 else 's')
lines.extend(plt.plot(x, y, label=label))
plt.ylabel("Time (ms)")
plt.xlabel("# Entities")
plt.legend(handles=lines, bbox_to_anchor=(0.5, 1))
plt.show()
|
Add script to plot benchmark results with matplotlib#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import pickle
from matplotlib import pyplot as plt
if len(sys.argv) < 2:
sys.exit("Usage: plot-results.py <pickle file>")
with open(sys.argv[1], 'rb') as pf:
results = pickle.load(pf)
lines = []
for num, result in results.items():
x, y = zip(*sorted(result.items()))
label = '%i Component%s' % (num, '' if num == 1 else 's')
lines.extend(plt.plot(x, y, label=label))
plt.ylabel("Time (ms)")
plt.xlabel("# Entities")
plt.legend(handles=lines, bbox_to_anchor=(0.5, 1))
plt.show()
|
<commit_before><commit_msg>Add script to plot benchmark results with matplotlib<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import pickle
from matplotlib import pyplot as plt
if len(sys.argv) < 2:
sys.exit("Usage: plot-results.py <pickle file>")
with open(sys.argv[1], 'rb') as pf:
results = pickle.load(pf)
lines = []
for num, result in results.items():
x, y = zip(*sorted(result.items()))
label = '%i Component%s' % (num, '' if num == 1 else 's')
lines.extend(plt.plot(x, y, label=label))
plt.ylabel("Time (ms)")
plt.xlabel("# Entities")
plt.legend(handles=lines, bbox_to_anchor=(0.5, 1))
plt.show()
|
|
becbda54738ab976342420faf684e9be417132e0
|
scripts/transtate.py
|
scripts/transtate.py
|
#!/usr/bin/env python
# Extracts the progress of translations from the compilation
# log in easily readable form. Make sure to delete all .qm
# files beforehand.
#
# Usage: cat log | .\transtate.py
#
import sys
import re
def n(val):
return (int(val) if val else 0)
if __name__ == "__main__":
#--Regex matches strings like
#
#Updating 'mumble_zh_CN.qm'...
# Generated 1421 translation(s) (1145 finished and 276 unfinished)
# Ignored 89 untranslated source text(s)
# s:\dev\QtMumble\bin\lrelease.exe mumble_zh_TW.ts
#Updating 'mumble_zh_TW.qm'...
# Generated 664 translation(s) (594 finished and 70 unfinished)
# Ignored 846 untranslated source text(s)
update = re.compile(r"Updating '([\w\.]+)'\.\.\.\s+Generated (\d+) translation\(s\) \((\d+) finished and (\d+) unfinished\)(?:\s+ Ignored (\d+) untranslated source text\(s\))?")
langs = 0
s = 's'
sortedbyuntranslated = sorted(update.findall(sys.stdin.read()), key=lambda s: (float(s[2]) / (n(s[1]) + n(s[4]))) if n(s[1]) else 10, reverse=True)
for lang, total, finished, unfinished, ignored in sortedbyuntranslated:
print "%s:" % lang
if int(total) == 0:
print " Source language"
else:
realtotal = n(total) + n(ignored)
percent = float(finished) / realtotal * 100
print " %d marked unfinished" % (n(unfinished))
if n(ignored):
print " %d untranslated." % (n(ignored))
print " => %d%% done (total %d + %d)." % (percent, n(total), n(ignored))
print
langs += 1
print "Number of languages: %d" % langs
|
Add a small script to pull translation state from compile logs.
|
Add a small script to pull translation state from compile logs.
|
Python
|
bsd-3-clause
|
niko20010/mumble,feld/mumble,mbax/mumble,richard227/mumble,bheart/mumble,feld/mumble,Githlar/mumble,LuAPi/mumble,richard227/mumble,feld/mumble,richard227/mumble,unascribed/mumble,Keridos/mumble,LuAPi/mumble,Lartza/mumble,unascribed/mumble,SuperNascher/mumble,Lartza/mumble,ccpgames/mumble,SuperNascher/mumble,Lartza/mumble,panaschieren/mumble-test,Zopieux/mumble,panaschieren/mumble-test,Githlar/mumble,panaschieren/mumble-test,Keridos/mumble,ccpgames/mumble,unascribed/mumble,Keridos/mumble,mbax/mumble,mbax/mumble,bheart/mumble,Natenom/mumble,feld/mumble,LuAPi/mumble,ccpgames/mumble,Natenom/mumble,richard227/mumble,panaschieren/mumble-test,mbax/mumble,Natenom/mumble,richard227/mumble,niko20010/mumble,ccpgames/mumble,Zopieux/mumble,Natenom/mumble,SuperNascher/mumble,panaschieren/mumble-test,Githlar/mumble,unascribed/mumble,panaschieren/mumble-test,bheart/mumble,SuperNascher/mumble,Zopieux/mumble,LuAPi/mumble,Natenom/mumble,Natenom/mumble,LuAPi/mumble,austinliou/mumble,feld/mumble,bheart/mumble,LuAPi/mumble,LuAPi/mumble,bheart/mumble,mbax/mumble,panaschieren/mumble-test,unascribed/mumble,Githlar/mumble,niko20010/mumble,LuAPi/mumble,Githlar/mumble,Keridos/mumble,Lartza/mumble,Lartza/mumble,austinliou/mumble,ccpgames/mumble,Githlar/mumble,bheart/mumble,unascribed/mumble,feld/mumble,austinliou/mumble,SuperNascher/mumble,richard227/mumble,richard227/mumble,mbax/mumble,niko20010/mumble,ccpgames/mumble,LuAPi/mumble,austinliou/mumble,bheart/mumble,feld/mumble,niko20010/mumble,SuperNascher/mumble,Githlar/mumble,ccpgames/mumble,Lartza/mumble,Keridos/mumble,SuperNascher/mumble,niko20010/mumble,Zopieux/mumble,feld/mumble,Keridos/mumble,panaschieren/mumble-test,Keridos/mumble,unascribed/mumble,bheart/mumble,Lartza/mumble,Zopieux/mumble,mbax/mumble,Zopieux/mumble,ccpgames/mumble,mbax/mumble,austinliou/mumble,austinliou/mumble,Keridos/mumble,SuperNascher/mumble,Zopieux/mumble,unascribed/mumble,Natenom/mumble,Natenom/mumble,Zopieux/mumble,austinliou/mumble,austinliou/mumble,SuperNascher/mumble,Lartza/mumble,Githlar/mumble,niko20010/mumble,richard227/mumble,niko20010/mumble
|
Add a small script to pull translation state from compile logs.
|
#!/usr/bin/env python
# Extracts the progress of translations from the compilation
# log in easily readable form. Make sure to delete all .qm
# files beforehand.
#
# Usage: cat log | .\transtate.py
#
import sys
import re
def n(val):
return (int(val) if val else 0)
if __name__ == "__main__":
#--Regex matches strings like
#
#Updating 'mumble_zh_CN.qm'...
# Generated 1421 translation(s) (1145 finished and 276 unfinished)
# Ignored 89 untranslated source text(s)
# s:\dev\QtMumble\bin\lrelease.exe mumble_zh_TW.ts
#Updating 'mumble_zh_TW.qm'...
# Generated 664 translation(s) (594 finished and 70 unfinished)
# Ignored 846 untranslated source text(s)
update = re.compile(r"Updating '([\w\.]+)'\.\.\.\s+Generated (\d+) translation\(s\) \((\d+) finished and (\d+) unfinished\)(?:\s+ Ignored (\d+) untranslated source text\(s\))?")
langs = 0
s = 's'
sortedbyuntranslated = sorted(update.findall(sys.stdin.read()), key=lambda s: (float(s[2]) / (n(s[1]) + n(s[4]))) if n(s[1]) else 10, reverse=True)
for lang, total, finished, unfinished, ignored in sortedbyuntranslated:
print "%s:" % lang
if int(total) == 0:
print " Source language"
else:
realtotal = n(total) + n(ignored)
percent = float(finished) / realtotal * 100
print " %d marked unfinished" % (n(unfinished))
if n(ignored):
print " %d untranslated." % (n(ignored))
print " => %d%% done (total %d + %d)." % (percent, n(total), n(ignored))
print
langs += 1
print "Number of languages: %d" % langs
|
<commit_before><commit_msg>Add a small script to pull translation state from compile logs.<commit_after>
|
#!/usr/bin/env python
# Extracts the progress of translations from the compilation
# log in easily readable form. Make sure to delete all .qm
# files beforehand.
#
# Usage: cat log | .\transtate.py
#
import sys
import re
def n(val):
return (int(val) if val else 0)
if __name__ == "__main__":
#--Regex matches strings like
#
#Updating 'mumble_zh_CN.qm'...
# Generated 1421 translation(s) (1145 finished and 276 unfinished)
# Ignored 89 untranslated source text(s)
# s:\dev\QtMumble\bin\lrelease.exe mumble_zh_TW.ts
#Updating 'mumble_zh_TW.qm'...
# Generated 664 translation(s) (594 finished and 70 unfinished)
# Ignored 846 untranslated source text(s)
update = re.compile(r"Updating '([\w\.]+)'\.\.\.\s+Generated (\d+) translation\(s\) \((\d+) finished and (\d+) unfinished\)(?:\s+ Ignored (\d+) untranslated source text\(s\))?")
langs = 0
s = 's'
sortedbyuntranslated = sorted(update.findall(sys.stdin.read()), key=lambda s: (float(s[2]) / (n(s[1]) + n(s[4]))) if n(s[1]) else 10, reverse=True)
for lang, total, finished, unfinished, ignored in sortedbyuntranslated:
print "%s:" % lang
if int(total) == 0:
print " Source language"
else:
realtotal = n(total) + n(ignored)
percent = float(finished) / realtotal * 100
print " %d marked unfinished" % (n(unfinished))
if n(ignored):
print " %d untranslated." % (n(ignored))
print " => %d%% done (total %d + %d)." % (percent, n(total), n(ignored))
print
langs += 1
print "Number of languages: %d" % langs
|
Add a small script to pull translation state from compile logs.#!/usr/bin/env python
# Extracts the progress of translations from the compilation
# log in easily readable form. Make sure to delete all .qm
# files beforehand.
#
# Usage: cat log | .\transtate.py
#
import sys
import re
def n(val):
return (int(val) if val else 0)
if __name__ == "__main__":
#--Regex matches strings like
#
#Updating 'mumble_zh_CN.qm'...
# Generated 1421 translation(s) (1145 finished and 276 unfinished)
# Ignored 89 untranslated source text(s)
# s:\dev\QtMumble\bin\lrelease.exe mumble_zh_TW.ts
#Updating 'mumble_zh_TW.qm'...
# Generated 664 translation(s) (594 finished and 70 unfinished)
# Ignored 846 untranslated source text(s)
update = re.compile(r"Updating '([\w\.]+)'\.\.\.\s+Generated (\d+) translation\(s\) \((\d+) finished and (\d+) unfinished\)(?:\s+ Ignored (\d+) untranslated source text\(s\))?")
langs = 0
s = 's'
sortedbyuntranslated = sorted(update.findall(sys.stdin.read()), key=lambda s: (float(s[2]) / (n(s[1]) + n(s[4]))) if n(s[1]) else 10, reverse=True)
for lang, total, finished, unfinished, ignored in sortedbyuntranslated:
print "%s:" % lang
if int(total) == 0:
print " Source language"
else:
realtotal = n(total) + n(ignored)
percent = float(finished) / realtotal * 100
print " %d marked unfinished" % (n(unfinished))
if n(ignored):
print " %d untranslated." % (n(ignored))
print " => %d%% done (total %d + %d)." % (percent, n(total), n(ignored))
print
langs += 1
print "Number of languages: %d" % langs
|
<commit_before><commit_msg>Add a small script to pull translation state from compile logs.<commit_after>#!/usr/bin/env python
# Extracts the progress of translations from the compilation
# log in easily readable form. Make sure to delete all .qm
# files beforehand.
#
# Usage: cat log | .\transtate.py
#
import sys
import re
def n(val):
return (int(val) if val else 0)
if __name__ == "__main__":
#--Regex matches strings like
#
#Updating 'mumble_zh_CN.qm'...
# Generated 1421 translation(s) (1145 finished and 276 unfinished)
# Ignored 89 untranslated source text(s)
# s:\dev\QtMumble\bin\lrelease.exe mumble_zh_TW.ts
#Updating 'mumble_zh_TW.qm'...
# Generated 664 translation(s) (594 finished and 70 unfinished)
# Ignored 846 untranslated source text(s)
update = re.compile(r"Updating '([\w\.]+)'\.\.\.\s+Generated (\d+) translation\(s\) \((\d+) finished and (\d+) unfinished\)(?:\s+ Ignored (\d+) untranslated source text\(s\))?")
langs = 0
s = 's'
sortedbyuntranslated = sorted(update.findall(sys.stdin.read()), key=lambda s: (float(s[2]) / (n(s[1]) + n(s[4]))) if n(s[1]) else 10, reverse=True)
for lang, total, finished, unfinished, ignored in sortedbyuntranslated:
print "%s:" % lang
if int(total) == 0:
print " Source language"
else:
realtotal = n(total) + n(ignored)
percent = float(finished) / realtotal * 100
print " %d marked unfinished" % (n(unfinished))
if n(ignored):
print " %d untranslated." % (n(ignored))
print " => %d%% done (total %d + %d)." % (percent, n(total), n(ignored))
print
langs += 1
print "Number of languages: %d" % langs
|
|
1254177287c7c40bd5658035cedc1fc26598b81e
|
py/garage/garage/codecs.py
|
py/garage/garage/codecs.py
|
"""Character encoding error handlers."""
__all__ = [
'make_error_logger',
]
def make_error_logger(logger):
"""Make handlers that logs and ignores encoding errors."""
def log_errors(exc):
logger.error('incorrect character encoding', exc_info=exc)
return ('', exc.end)
return log_errors
|
Add character encoding error handler
|
Add character encoding error handler
|
Python
|
mit
|
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
|
Add character encoding error handler
|
"""Character encoding error handlers."""
__all__ = [
'make_error_logger',
]
def make_error_logger(logger):
"""Make handlers that logs and ignores encoding errors."""
def log_errors(exc):
logger.error('incorrect character encoding', exc_info=exc)
return ('', exc.end)
return log_errors
|
<commit_before><commit_msg>Add character encoding error handler<commit_after>
|
"""Character encoding error handlers."""
__all__ = [
'make_error_logger',
]
def make_error_logger(logger):
"""Make handlers that logs and ignores encoding errors."""
def log_errors(exc):
logger.error('incorrect character encoding', exc_info=exc)
return ('', exc.end)
return log_errors
|
Add character encoding error handler"""Character encoding error handlers."""
__all__ = [
'make_error_logger',
]
def make_error_logger(logger):
"""Make handlers that logs and ignores encoding errors."""
def log_errors(exc):
logger.error('incorrect character encoding', exc_info=exc)
return ('', exc.end)
return log_errors
|
<commit_before><commit_msg>Add character encoding error handler<commit_after>"""Character encoding error handlers."""
__all__ = [
'make_error_logger',
]
def make_error_logger(logger):
"""Make handlers that logs and ignores encoding errors."""
def log_errors(exc):
logger.error('incorrect character encoding', exc_info=exc)
return ('', exc.end)
return log_errors
|
|
c1e1d4d40b8344437f8f2fb3fa44f60a42d5112d
|
config_diag/tests/test_util.py
|
config_diag/tests/test_util.py
|
from .examples import load_email_client
from ..policy import MDPDialogBuilder
from ..util import simulate_dialog, cross_validation
EMAIL_CLIENT = load_email_client()
def test_simulate_dialog():
builder = MDPDialogBuilder(
config_sample=EMAIL_CLIENT.config_sample,
assoc_rule_min_support=EMAIL_CLIENT.min_support,
assoc_rule_min_confidence=EMAIL_CLIENT.min_confidence)
dialog = builder.build_dialog()
accuracy, questions = simulate_dialog(dialog, EMAIL_CLIENT.config)
assert accuracy == 1.0
assert questions == 0.5
|
Add a test for simulate_dialog
|
Add a test for simulate_dialog
|
Python
|
apache-2.0
|
yasserglez/configurator,yasserglez/configurator
|
Add a test for simulate_dialog
|
from .examples import load_email_client
from ..policy import MDPDialogBuilder
from ..util import simulate_dialog, cross_validation
EMAIL_CLIENT = load_email_client()
def test_simulate_dialog():
builder = MDPDialogBuilder(
config_sample=EMAIL_CLIENT.config_sample,
assoc_rule_min_support=EMAIL_CLIENT.min_support,
assoc_rule_min_confidence=EMAIL_CLIENT.min_confidence)
dialog = builder.build_dialog()
accuracy, questions = simulate_dialog(dialog, EMAIL_CLIENT.config)
assert accuracy == 1.0
assert questions == 0.5
|
<commit_before><commit_msg>Add a test for simulate_dialog<commit_after>
|
from .examples import load_email_client
from ..policy import MDPDialogBuilder
from ..util import simulate_dialog, cross_validation
EMAIL_CLIENT = load_email_client()
def test_simulate_dialog():
builder = MDPDialogBuilder(
config_sample=EMAIL_CLIENT.config_sample,
assoc_rule_min_support=EMAIL_CLIENT.min_support,
assoc_rule_min_confidence=EMAIL_CLIENT.min_confidence)
dialog = builder.build_dialog()
accuracy, questions = simulate_dialog(dialog, EMAIL_CLIENT.config)
assert accuracy == 1.0
assert questions == 0.5
|
Add a test for simulate_dialog
from .examples import load_email_client
from ..policy import MDPDialogBuilder
from ..util import simulate_dialog, cross_validation
EMAIL_CLIENT = load_email_client()
def test_simulate_dialog():
builder = MDPDialogBuilder(
config_sample=EMAIL_CLIENT.config_sample,
assoc_rule_min_support=EMAIL_CLIENT.min_support,
assoc_rule_min_confidence=EMAIL_CLIENT.min_confidence)
dialog = builder.build_dialog()
accuracy, questions = simulate_dialog(dialog, EMAIL_CLIENT.config)
assert accuracy == 1.0
assert questions == 0.5
|
<commit_before><commit_msg>Add a test for simulate_dialog<commit_after>
from .examples import load_email_client
from ..policy import MDPDialogBuilder
from ..util import simulate_dialog, cross_validation
EMAIL_CLIENT = load_email_client()
def test_simulate_dialog():
builder = MDPDialogBuilder(
config_sample=EMAIL_CLIENT.config_sample,
assoc_rule_min_support=EMAIL_CLIENT.min_support,
assoc_rule_min_confidence=EMAIL_CLIENT.min_confidence)
dialog = builder.build_dialog()
accuracy, questions = simulate_dialog(dialog, EMAIL_CLIENT.config)
assert accuracy == 1.0
assert questions == 0.5
|
|
f5382cf42ffb7bb96b4c6616ef59c569194096cf
|
tests/test_vector2_cross.py
|
tests/test_vector2_cross.py
|
from ppb_vector import Vector2
import pytest
@pytest.mark.parametrize("left, right, expected", [
(Vector2(1, 1), Vector2(0, -1), -1),
(Vector2(1, 1), Vector2(-1, 0), 1),
(Vector2(0, 1), Vector2(0, -1), 0),
(Vector2(-1, -1), Vector2(1, 0), 1),
(Vector2(-1, -1), Vector2(-1, 0), -1)
])
def test_cross(left, right, expected):
assert left ^ right == expected
assert right ^ left == -expected
|
Add tests for cross product
|
Add tests for cross product
|
Python
|
artistic-2.0
|
ppb/ppb-vector,ppb/ppb-vector
|
Add tests for cross product
|
from ppb_vector import Vector2
import pytest
@pytest.mark.parametrize("left, right, expected", [
(Vector2(1, 1), Vector2(0, -1), -1),
(Vector2(1, 1), Vector2(-1, 0), 1),
(Vector2(0, 1), Vector2(0, -1), 0),
(Vector2(-1, -1), Vector2(1, 0), 1),
(Vector2(-1, -1), Vector2(-1, 0), -1)
])
def test_cross(left, right, expected):
assert left ^ right == expected
assert right ^ left == -expected
|
<commit_before><commit_msg>Add tests for cross product<commit_after>
|
from ppb_vector import Vector2
import pytest
@pytest.mark.parametrize("left, right, expected", [
(Vector2(1, 1), Vector2(0, -1), -1),
(Vector2(1, 1), Vector2(-1, 0), 1),
(Vector2(0, 1), Vector2(0, -1), 0),
(Vector2(-1, -1), Vector2(1, 0), 1),
(Vector2(-1, -1), Vector2(-1, 0), -1)
])
def test_cross(left, right, expected):
assert left ^ right == expected
assert right ^ left == -expected
|
Add tests for cross productfrom ppb_vector import Vector2
import pytest
@pytest.mark.parametrize("left, right, expected", [
(Vector2(1, 1), Vector2(0, -1), -1),
(Vector2(1, 1), Vector2(-1, 0), 1),
(Vector2(0, 1), Vector2(0, -1), 0),
(Vector2(-1, -1), Vector2(1, 0), 1),
(Vector2(-1, -1), Vector2(-1, 0), -1)
])
def test_cross(left, right, expected):
assert left ^ right == expected
assert right ^ left == -expected
|
<commit_before><commit_msg>Add tests for cross product<commit_after>from ppb_vector import Vector2
import pytest
@pytest.mark.parametrize("left, right, expected", [
(Vector2(1, 1), Vector2(0, -1), -1),
(Vector2(1, 1), Vector2(-1, 0), 1),
(Vector2(0, 1), Vector2(0, -1), 0),
(Vector2(-1, -1), Vector2(1, 0), 1),
(Vector2(-1, -1), Vector2(-1, 0), -1)
])
def test_cross(left, right, expected):
assert left ^ right == expected
assert right ^ left == -expected
|
|
532649313b4660f0f2aa360940c8d90d2091dda9
|
utils/carouselify_images.py
|
utils/carouselify_images.py
|
""" XXX: Helper script ... """
from os.path import basename
from PIL import Image
ASPECT_RATIOS = (1.5, 2, 2.5)
PREFIX = '/assets/img/'
STYLE_MAPPING = {
'1.5': 'visible-sm',
'2': 'visible-md',
'2.5': 'visible-lg',
}
def distance(a, b):
return (a-b) * (a-b)
def save_image(image, aspect_ratio, filename, format):
""" Save the image with the aspect_ratio details in the name. """
from os.path import splitext
name, ext = splitext(filename)
outfile = ''.join([name, '-', str(aspect_ratio), ext])
image.save(outfile, format)
return outfile
def get_image_aspect_ratio(image):
""" Return the aspect ratio of the image. """
width, height = image.size
ratio = float(width)/height
d = 1000000
aspect_ratio = ASPECT_RATIOS[0]
for r in ASPECT_RATIOS:
d_ = distance(ratio, r)
if d_ <= d:
aspect_ratio = r
d = d_
return aspect_ratio
def convert_to(image, current_aspect_ratio, new_aspect_ratio):
""" Convert the image to the given aspect ratio. """
crop_factor = current_aspect_ratio / new_aspect_ratio
if crop_factor != 1.0:
width, height = image.size
crop_height = int(height * (1-crop_factor) / 2)
area = image.crop((0, crop_height, width, height-crop_height))
else:
area = image.copy()
return save_image(area, new_aspect_ratio, image.filename, image.format)
def carouselify_image(image_path):
""" Make the image usable in a carousel. """
image = Image.open(image_path)
aspect_ratio = get_image_aspect_ratio(image)
for r in ASPECT_RATIOS:
filename = basename(convert_to(image, aspect_ratio, r))
path = PREFIX + filename
print('<img src="%s" class="%s">' % (path, STYLE_MAPPING[str(r)]))
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: %s path/to/image' % sys.argv[0])
else:
carouselify_image(sys.argv[1])
|
Add script to generate images suitable for carousel.
|
Add script to generate images suitable for carousel.
|
Python
|
bsd-3-clause
|
punchagan/mumbaiultimate.in,punchagan/mumbaiultimate.in
|
Add script to generate images suitable for carousel.
|
""" XXX: Helper script ... """
from os.path import basename
from PIL import Image
ASPECT_RATIOS = (1.5, 2, 2.5)
PREFIX = '/assets/img/'
STYLE_MAPPING = {
'1.5': 'visible-sm',
'2': 'visible-md',
'2.5': 'visible-lg',
}
def distance(a, b):
return (a-b) * (a-b)
def save_image(image, aspect_ratio, filename, format):
""" Save the image with the aspect_ratio details in the name. """
from os.path import splitext
name, ext = splitext(filename)
outfile = ''.join([name, '-', str(aspect_ratio), ext])
image.save(outfile, format)
return outfile
def get_image_aspect_ratio(image):
""" Return the aspect ratio of the image. """
width, height = image.size
ratio = float(width)/height
d = 1000000
aspect_ratio = ASPECT_RATIOS[0]
for r in ASPECT_RATIOS:
d_ = distance(ratio, r)
if d_ <= d:
aspect_ratio = r
d = d_
return aspect_ratio
def convert_to(image, current_aspect_ratio, new_aspect_ratio):
""" Convert the image to the given aspect ratio. """
crop_factor = current_aspect_ratio / new_aspect_ratio
if crop_factor != 1.0:
width, height = image.size
crop_height = int(height * (1-crop_factor) / 2)
area = image.crop((0, crop_height, width, height-crop_height))
else:
area = image.copy()
return save_image(area, new_aspect_ratio, image.filename, image.format)
def carouselify_image(image_path):
""" Make the image usable in a carousel. """
image = Image.open(image_path)
aspect_ratio = get_image_aspect_ratio(image)
for r in ASPECT_RATIOS:
filename = basename(convert_to(image, aspect_ratio, r))
path = PREFIX + filename
print('<img src="%s" class="%s">' % (path, STYLE_MAPPING[str(r)]))
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: %s path/to/image' % sys.argv[0])
else:
carouselify_image(sys.argv[1])
|
<commit_before><commit_msg>Add script to generate images suitable for carousel.<commit_after>
|
""" XXX: Helper script ... """
from os.path import basename
from PIL import Image
ASPECT_RATIOS = (1.5, 2, 2.5)
PREFIX = '/assets/img/'
STYLE_MAPPING = {
'1.5': 'visible-sm',
'2': 'visible-md',
'2.5': 'visible-lg',
}
def distance(a, b):
return (a-b) * (a-b)
def save_image(image, aspect_ratio, filename, format):
""" Save the image with the aspect_ratio details in the name. """
from os.path import splitext
name, ext = splitext(filename)
outfile = ''.join([name, '-', str(aspect_ratio), ext])
image.save(outfile, format)
return outfile
def get_image_aspect_ratio(image):
""" Return the aspect ratio of the image. """
width, height = image.size
ratio = float(width)/height
d = 1000000
aspect_ratio = ASPECT_RATIOS[0]
for r in ASPECT_RATIOS:
d_ = distance(ratio, r)
if d_ <= d:
aspect_ratio = r
d = d_
return aspect_ratio
def convert_to(image, current_aspect_ratio, new_aspect_ratio):
""" Convert the image to the given aspect ratio. """
crop_factor = current_aspect_ratio / new_aspect_ratio
if crop_factor != 1.0:
width, height = image.size
crop_height = int(height * (1-crop_factor) / 2)
area = image.crop((0, crop_height, width, height-crop_height))
else:
area = image.copy()
return save_image(area, new_aspect_ratio, image.filename, image.format)
def carouselify_image(image_path):
""" Make the image usable in a carousel. """
image = Image.open(image_path)
aspect_ratio = get_image_aspect_ratio(image)
for r in ASPECT_RATIOS:
filename = basename(convert_to(image, aspect_ratio, r))
path = PREFIX + filename
print('<img src="%s" class="%s">' % (path, STYLE_MAPPING[str(r)]))
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: %s path/to/image' % sys.argv[0])
else:
carouselify_image(sys.argv[1])
|
Add script to generate images suitable for carousel.""" XXX: Helper script ... """
from os.path import basename
from PIL import Image
ASPECT_RATIOS = (1.5, 2, 2.5)
PREFIX = '/assets/img/'
STYLE_MAPPING = {
'1.5': 'visible-sm',
'2': 'visible-md',
'2.5': 'visible-lg',
}
def distance(a, b):
return (a-b) * (a-b)
def save_image(image, aspect_ratio, filename, format):
""" Save the image with the aspect_ratio details in the name. """
from os.path import splitext
name, ext = splitext(filename)
outfile = ''.join([name, '-', str(aspect_ratio), ext])
image.save(outfile, format)
return outfile
def get_image_aspect_ratio(image):
""" Return the aspect ratio of the image. """
width, height = image.size
ratio = float(width)/height
d = 1000000
aspect_ratio = ASPECT_RATIOS[0]
for r in ASPECT_RATIOS:
d_ = distance(ratio, r)
if d_ <= d:
aspect_ratio = r
d = d_
return aspect_ratio
def convert_to(image, current_aspect_ratio, new_aspect_ratio):
""" Convert the image to the given aspect ratio. """
crop_factor = current_aspect_ratio / new_aspect_ratio
if crop_factor != 1.0:
width, height = image.size
crop_height = int(height * (1-crop_factor) / 2)
area = image.crop((0, crop_height, width, height-crop_height))
else:
area = image.copy()
return save_image(area, new_aspect_ratio, image.filename, image.format)
def carouselify_image(image_path):
""" Make the image usable in a carousel. """
image = Image.open(image_path)
aspect_ratio = get_image_aspect_ratio(image)
for r in ASPECT_RATIOS:
filename = basename(convert_to(image, aspect_ratio, r))
path = PREFIX + filename
print('<img src="%s" class="%s">' % (path, STYLE_MAPPING[str(r)]))
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: %s path/to/image' % sys.argv[0])
else:
carouselify_image(sys.argv[1])
|
<commit_before><commit_msg>Add script to generate images suitable for carousel.<commit_after>""" XXX: Helper script ... """
from os.path import basename
from PIL import Image
ASPECT_RATIOS = (1.5, 2, 2.5)
PREFIX = '/assets/img/'
STYLE_MAPPING = {
'1.5': 'visible-sm',
'2': 'visible-md',
'2.5': 'visible-lg',
}
def distance(a, b):
return (a-b) * (a-b)
def save_image(image, aspect_ratio, filename, format):
""" Save the image with the aspect_ratio details in the name. """
from os.path import splitext
name, ext = splitext(filename)
outfile = ''.join([name, '-', str(aspect_ratio), ext])
image.save(outfile, format)
return outfile
def get_image_aspect_ratio(image):
""" Return the aspect ratio of the image. """
width, height = image.size
ratio = float(width)/height
d = 1000000
aspect_ratio = ASPECT_RATIOS[0]
for r in ASPECT_RATIOS:
d_ = distance(ratio, r)
if d_ <= d:
aspect_ratio = r
d = d_
return aspect_ratio
def convert_to(image, current_aspect_ratio, new_aspect_ratio):
""" Convert the image to the given aspect ratio. """
crop_factor = current_aspect_ratio / new_aspect_ratio
if crop_factor != 1.0:
width, height = image.size
crop_height = int(height * (1-crop_factor) / 2)
area = image.crop((0, crop_height, width, height-crop_height))
else:
area = image.copy()
return save_image(area, new_aspect_ratio, image.filename, image.format)
def carouselify_image(image_path):
""" Make the image usable in a carousel. """
image = Image.open(image_path)
aspect_ratio = get_image_aspect_ratio(image)
for r in ASPECT_RATIOS:
filename = basename(convert_to(image, aspect_ratio, r))
path = PREFIX + filename
print('<img src="%s" class="%s">' % (path, STYLE_MAPPING[str(r)]))
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: %s path/to/image' % sys.argv[0])
else:
carouselify_image(sys.argv[1])
|
|
fc7904ed8753edba2e3a2ed7f6ca702c2f942903
|
test/test_outputs.py
|
test/test_outputs.py
|
"""Test the output module."""
frc_out = outputs.frc_eia923_df(pudl_engine)
gens_out = outputs.gens_eia860_df(pudl_engine)
gf_out = outputs.gf_eia923_df(pudl_engine)
pu_eia = outputs.plants_utils_eia_df(pudl_engine)
pu_ferc = outputs.plants_utils_ferc_df(pudl_engine)
steam_out = outputs.plants_steam_ferc1_df(pudl_engine)
fuel_out = outputs.fuel_ferc1_df(pudl_engine)
|
Add skeletal output test cases.
|
Add skeletal output test cases.
|
Python
|
mit
|
catalyst-cooperative/pudl,catalyst-cooperative/pudl
|
Add skeletal output test cases.
|
"""Test the output module."""
frc_out = outputs.frc_eia923_df(pudl_engine)
gens_out = outputs.gens_eia860_df(pudl_engine)
gf_out = outputs.gf_eia923_df(pudl_engine)
pu_eia = outputs.plants_utils_eia_df(pudl_engine)
pu_ferc = outputs.plants_utils_ferc_df(pudl_engine)
steam_out = outputs.plants_steam_ferc1_df(pudl_engine)
fuel_out = outputs.fuel_ferc1_df(pudl_engine)
|
<commit_before><commit_msg>Add skeletal output test cases.<commit_after>
|
"""Test the output module."""
frc_out = outputs.frc_eia923_df(pudl_engine)
gens_out = outputs.gens_eia860_df(pudl_engine)
gf_out = outputs.gf_eia923_df(pudl_engine)
pu_eia = outputs.plants_utils_eia_df(pudl_engine)
pu_ferc = outputs.plants_utils_ferc_df(pudl_engine)
steam_out = outputs.plants_steam_ferc1_df(pudl_engine)
fuel_out = outputs.fuel_ferc1_df(pudl_engine)
|
Add skeletal output test cases."""Test the output module."""
frc_out = outputs.frc_eia923_df(pudl_engine)
gens_out = outputs.gens_eia860_df(pudl_engine)
gf_out = outputs.gf_eia923_df(pudl_engine)
pu_eia = outputs.plants_utils_eia_df(pudl_engine)
pu_ferc = outputs.plants_utils_ferc_df(pudl_engine)
steam_out = outputs.plants_steam_ferc1_df(pudl_engine)
fuel_out = outputs.fuel_ferc1_df(pudl_engine)
|
<commit_before><commit_msg>Add skeletal output test cases.<commit_after>"""Test the output module."""
frc_out = outputs.frc_eia923_df(pudl_engine)
gens_out = outputs.gens_eia860_df(pudl_engine)
gf_out = outputs.gf_eia923_df(pudl_engine)
pu_eia = outputs.plants_utils_eia_df(pudl_engine)
pu_ferc = outputs.plants_utils_ferc_df(pudl_engine)
steam_out = outputs.plants_steam_ferc1_df(pudl_engine)
fuel_out = outputs.fuel_ferc1_df(pudl_engine)
|
|
0db8f4b170793e1e31cb86479d931239f69efa77
|
annealing_lr.py
|
annealing_lr.py
|
import abc
from math import exp
"""Classes for annealing learning rate schedules."""
class AnnealingSchedule(object):
def __init__(self, initial_lr, decay_rate, decay_step):
self.initial_lr = initial_lr
self.lr = initial_lr
self.k = k
self.decay_step = decay_step
self.global_step = 0
def __mul__(self, z):
self.global_step += 1
if (self.global_step % self.decay_step) == 0: self._anneal_lr()
return self.lr * z
@abc.abstractmethod
def _anneal_lr(self):
"""Define your annealing schedule here."""
return
class InvScaling(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(InvScaling, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr / (1 + self.k * self.global_step)
class ExponentialDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(ExponentialDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr * exp(-self.k * self.global_step)
class StepDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(StepDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.lr - self.decay_rate
|
Implement annealing learning rate schedules.
|
Implement annealing learning rate schedules.
|
Python
|
mit
|
prasanna08/MachineLearning
|
Implement annealing learning rate schedules.
|
import abc
from math import exp
"""Classes for annealing learning rate schedules."""
class AnnealingSchedule(object):
def __init__(self, initial_lr, decay_rate, decay_step):
self.initial_lr = initial_lr
self.lr = initial_lr
self.k = k
self.decay_step = decay_step
self.global_step = 0
def __mul__(self, z):
self.global_step += 1
if (self.global_step % self.decay_step) == 0: self._anneal_lr()
return self.lr * z
@abc.abstractmethod
def _anneal_lr(self):
"""Define your annealing schedule here."""
return
class InvScaling(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(InvScaling, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr / (1 + self.k * self.global_step)
class ExponentialDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(ExponentialDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr * exp(-self.k * self.global_step)
class StepDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(StepDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.lr - self.decay_rate
|
<commit_before><commit_msg>Implement annealing learning rate schedules.<commit_after>
|
import abc
from math import exp
"""Classes for annealing learning rate schedules."""
class AnnealingSchedule(object):
def __init__(self, initial_lr, decay_rate, decay_step):
self.initial_lr = initial_lr
self.lr = initial_lr
self.k = k
self.decay_step = decay_step
self.global_step = 0
def __mul__(self, z):
self.global_step += 1
if (self.global_step % self.decay_step) == 0: self._anneal_lr()
return self.lr * z
@abc.abstractmethod
def _anneal_lr(self):
"""Define your annealing schedule here."""
return
class InvScaling(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(InvScaling, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr / (1 + self.k * self.global_step)
class ExponentialDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(ExponentialDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr * exp(-self.k * self.global_step)
class StepDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(StepDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.lr - self.decay_rate
|
Implement annealing learning rate schedules.import abc
from math import exp
"""Classes for annealing learning rate schedules."""
class AnnealingSchedule(object):
def __init__(self, initial_lr, decay_rate, decay_step):
self.initial_lr = initial_lr
self.lr = initial_lr
self.k = k
self.decay_step = decay_step
self.global_step = 0
def __mul__(self, z):
self.global_step += 1
if (self.global_step % self.decay_step) == 0: self._anneal_lr()
return self.lr * z
@abc.abstractmethod
def _anneal_lr(self):
"""Define your annealing schedule here."""
return
class InvScaling(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(InvScaling, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr / (1 + self.k * self.global_step)
class ExponentialDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(ExponentialDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr * exp(-self.k * self.global_step)
class StepDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(StepDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.lr - self.decay_rate
|
<commit_before><commit_msg>Implement annealing learning rate schedules.<commit_after>import abc
from math import exp
"""Classes for annealing learning rate schedules."""
class AnnealingSchedule(object):
def __init__(self, initial_lr, decay_rate, decay_step):
self.initial_lr = initial_lr
self.lr = initial_lr
self.k = k
self.decay_step = decay_step
self.global_step = 0
def __mul__(self, z):
self.global_step += 1
if (self.global_step % self.decay_step) == 0: self._anneal_lr()
return self.lr * z
@abc.abstractmethod
def _anneal_lr(self):
"""Define your annealing schedule here."""
return
class InvScaling(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(InvScaling, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr / (1 + self.k * self.global_step)
class ExponentialDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(ExponentialDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.initial_lr * exp(-self.k * self.global_step)
class StepDecay(AnnealingSchedule):
def __init__(self, initial_lr, decay_rate, decay_step=10):
super(StepDecay, self).__init__(initial_lr, decay_rate, decay_step)
def _anneal_lr(self):
self.lr = self.lr - self.decay_rate
|
|
39df35c45ffa672b6ad9af962e53a9dc2c45be85
|
netprofile_core/netprofile_core/tasks.py
|
netprofile_core/netprofile_core/tasks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Core module - Tasks
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from pyramid.settings import asbool
from repoze.sendmail.queue import QueueProcessor
from repoze.sendmail.mailer import SMTPMailer
from netprofile.celery import (
app,
task_meta
)
from netprofile.common.util import make_config_dict
_ = TranslationStringFactory('netprofile_core')
@task_meta(cap='BASE_ADMIN',
title=_('Send queued mail'))
@app.task
def task_send_queued_mail():
cfg = make_config_dict(app.settings, 'mail.')
tls = cfg.get('tls')
if tls is not None:
tls = asbool(tls)
mailer = SMTPMailer(hostname=cfg.get('host', 'localhost'),
port=int(cfg.get('port', 25)),
username=cfg.get('username'),
password=cfg.get('password'),
no_tls=tls is False,
force_tls=tls is True,
ssl=asbool(cfg.get('ssl', False)),
debug_smtp=asbool(cfg.get('debug', False)))
maildir = cfg['queue_path']
qp = QueueProcessor(mailer, maildir, ignore_transient=True)
qp.send_messages()
|
Add internal task to flush mail queue
|
Add internal task to flush mail queue
|
Python
|
agpl-3.0
|
unikmhz/npui,unikmhz/npui,unikmhz/npui,unikmhz/npui
|
Add internal task to flush mail queue
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Core module - Tasks
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from pyramid.settings import asbool
from repoze.sendmail.queue import QueueProcessor
from repoze.sendmail.mailer import SMTPMailer
from netprofile.celery import (
app,
task_meta
)
from netprofile.common.util import make_config_dict
_ = TranslationStringFactory('netprofile_core')
@task_meta(cap='BASE_ADMIN',
title=_('Send queued mail'))
@app.task
def task_send_queued_mail():
cfg = make_config_dict(app.settings, 'mail.')
tls = cfg.get('tls')
if tls is not None:
tls = asbool(tls)
mailer = SMTPMailer(hostname=cfg.get('host', 'localhost'),
port=int(cfg.get('port', 25)),
username=cfg.get('username'),
password=cfg.get('password'),
no_tls=tls is False,
force_tls=tls is True,
ssl=asbool(cfg.get('ssl', False)),
debug_smtp=asbool(cfg.get('debug', False)))
maildir = cfg['queue_path']
qp = QueueProcessor(mailer, maildir, ignore_transient=True)
qp.send_messages()
|
<commit_before><commit_msg>Add internal task to flush mail queue<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Core module - Tasks
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from pyramid.settings import asbool
from repoze.sendmail.queue import QueueProcessor
from repoze.sendmail.mailer import SMTPMailer
from netprofile.celery import (
app,
task_meta
)
from netprofile.common.util import make_config_dict
_ = TranslationStringFactory('netprofile_core')
@task_meta(cap='BASE_ADMIN',
title=_('Send queued mail'))
@app.task
def task_send_queued_mail():
cfg = make_config_dict(app.settings, 'mail.')
tls = cfg.get('tls')
if tls is not None:
tls = asbool(tls)
mailer = SMTPMailer(hostname=cfg.get('host', 'localhost'),
port=int(cfg.get('port', 25)),
username=cfg.get('username'),
password=cfg.get('password'),
no_tls=tls is False,
force_tls=tls is True,
ssl=asbool(cfg.get('ssl', False)),
debug_smtp=asbool(cfg.get('debug', False)))
maildir = cfg['queue_path']
qp = QueueProcessor(mailer, maildir, ignore_transient=True)
qp.send_messages()
|
Add internal task to flush mail queue#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Core module - Tasks
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from pyramid.settings import asbool
from repoze.sendmail.queue import QueueProcessor
from repoze.sendmail.mailer import SMTPMailer
from netprofile.celery import (
app,
task_meta
)
from netprofile.common.util import make_config_dict
_ = TranslationStringFactory('netprofile_core')
@task_meta(cap='BASE_ADMIN',
title=_('Send queued mail'))
@app.task
def task_send_queued_mail():
cfg = make_config_dict(app.settings, 'mail.')
tls = cfg.get('tls')
if tls is not None:
tls = asbool(tls)
mailer = SMTPMailer(hostname=cfg.get('host', 'localhost'),
port=int(cfg.get('port', 25)),
username=cfg.get('username'),
password=cfg.get('password'),
no_tls=tls is False,
force_tls=tls is True,
ssl=asbool(cfg.get('ssl', False)),
debug_smtp=asbool(cfg.get('debug', False)))
maildir = cfg['queue_path']
qp = QueueProcessor(mailer, maildir, ignore_transient=True)
qp.send_messages()
|
<commit_before><commit_msg>Add internal task to flush mail queue<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Core module - Tasks
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from pyramid.settings import asbool
from repoze.sendmail.queue import QueueProcessor
from repoze.sendmail.mailer import SMTPMailer
from netprofile.celery import (
app,
task_meta
)
from netprofile.common.util import make_config_dict
_ = TranslationStringFactory('netprofile_core')
@task_meta(cap='BASE_ADMIN',
title=_('Send queued mail'))
@app.task
def task_send_queued_mail():
cfg = make_config_dict(app.settings, 'mail.')
tls = cfg.get('tls')
if tls is not None:
tls = asbool(tls)
mailer = SMTPMailer(hostname=cfg.get('host', 'localhost'),
port=int(cfg.get('port', 25)),
username=cfg.get('username'),
password=cfg.get('password'),
no_tls=tls is False,
force_tls=tls is True,
ssl=asbool(cfg.get('ssl', False)),
debug_smtp=asbool(cfg.get('debug', False)))
maildir = cfg['queue_path']
qp = QueueProcessor(mailer, maildir, ignore_transient=True)
qp.send_messages()
|
|
05624ed9e24886dc8ac0f89b097b8f165aa719fe
|
tests/test_models.py
|
tests/test_models.py
|
#! /usr/bin/env python
import os
import pytest
from pymt import models
@pytest.mark.parametrize("cls", models.__all__)
def test_model_setup(cls):
model = models.__dict__[cls]()
args = model.setup()
assert os.path.isfile(os.path.join(args[1], args[0]))
@pytest.mark.parametrize("cls", models.__all__)
def test_model_initialize(cls):
model = models.__dict__[cls]()
args = model.setup()
model.initialize(*args)
assert model.initdir == args[1]
assert model._initialized
@pytest.mark.parametrize("cls", models.__all__)
def test_model_irf(cls):
model = models.__dict__[cls]()
model.initialize(*model.setup())
model.update()
assert model.get_current_time() > model.get_start_time()
model.finalize()
|
Add simple test for included model IRF methods.
|
Add simple test for included model IRF methods.
|
Python
|
mit
|
csdms/pymt,csdms/coupling,csdms/coupling
|
Add simple test for included model IRF methods.
|
#! /usr/bin/env python
import os
import pytest
from pymt import models
@pytest.mark.parametrize("cls", models.__all__)
def test_model_setup(cls):
model = models.__dict__[cls]()
args = model.setup()
assert os.path.isfile(os.path.join(args[1], args[0]))
@pytest.mark.parametrize("cls", models.__all__)
def test_model_initialize(cls):
model = models.__dict__[cls]()
args = model.setup()
model.initialize(*args)
assert model.initdir == args[1]
assert model._initialized
@pytest.mark.parametrize("cls", models.__all__)
def test_model_irf(cls):
model = models.__dict__[cls]()
model.initialize(*model.setup())
model.update()
assert model.get_current_time() > model.get_start_time()
model.finalize()
|
<commit_before><commit_msg>Add simple test for included model IRF methods.<commit_after>
|
#! /usr/bin/env python
import os
import pytest
from pymt import models
@pytest.mark.parametrize("cls", models.__all__)
def test_model_setup(cls):
model = models.__dict__[cls]()
args = model.setup()
assert os.path.isfile(os.path.join(args[1], args[0]))
@pytest.mark.parametrize("cls", models.__all__)
def test_model_initialize(cls):
model = models.__dict__[cls]()
args = model.setup()
model.initialize(*args)
assert model.initdir == args[1]
assert model._initialized
@pytest.mark.parametrize("cls", models.__all__)
def test_model_irf(cls):
model = models.__dict__[cls]()
model.initialize(*model.setup())
model.update()
assert model.get_current_time() > model.get_start_time()
model.finalize()
|
Add simple test for included model IRF methods.#! /usr/bin/env python
import os
import pytest
from pymt import models
@pytest.mark.parametrize("cls", models.__all__)
def test_model_setup(cls):
model = models.__dict__[cls]()
args = model.setup()
assert os.path.isfile(os.path.join(args[1], args[0]))
@pytest.mark.parametrize("cls", models.__all__)
def test_model_initialize(cls):
model = models.__dict__[cls]()
args = model.setup()
model.initialize(*args)
assert model.initdir == args[1]
assert model._initialized
@pytest.mark.parametrize("cls", models.__all__)
def test_model_irf(cls):
model = models.__dict__[cls]()
model.initialize(*model.setup())
model.update()
assert model.get_current_time() > model.get_start_time()
model.finalize()
|
<commit_before><commit_msg>Add simple test for included model IRF methods.<commit_after>#! /usr/bin/env python
import os
import pytest
from pymt import models
@pytest.mark.parametrize("cls", models.__all__)
def test_model_setup(cls):
model = models.__dict__[cls]()
args = model.setup()
assert os.path.isfile(os.path.join(args[1], args[0]))
@pytest.mark.parametrize("cls", models.__all__)
def test_model_initialize(cls):
model = models.__dict__[cls]()
args = model.setup()
model.initialize(*args)
assert model.initdir == args[1]
assert model._initialized
@pytest.mark.parametrize("cls", models.__all__)
def test_model_irf(cls):
model = models.__dict__[cls]()
model.initialize(*model.setup())
model.update()
assert model.get_current_time() > model.get_start_time()
model.finalize()
|
|
5f8dd68c094c5da5dd21970eb8038b226521de8b
|
hydrotrend-2/run_hydrotrend.py
|
hydrotrend-2/run_hydrotrend.py
|
#! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call
import numpy as np
def read(file):
'''
Reads a column of text containing HydroTrend output. Returns a numpy array.
'''
with open(file, 'r') as f:
values = f.read().split('\n')
return np.array(values[2:-1], dtype=np.float) # Remove header lines and EOF.
def write(file, array):
'''
Writes a Dakota results file from an input array.
'''
labels = ['Qs_mean', 'Qs_stdev']
f = open(file, 'w')
for i in range(len(array)):
f.write(str(array[i]) + '\t' + labels[i] + '\n')
f.close()
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if not os.path.exists(input_dir): os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if not os.path.exists(output_dir): os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend', '--in-dir', input_dir, '--out-dir', output_dir])
# Calculate mean and standard deviation of Qs for the simulation
# time. Write the results to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
Qs = read(output_file)
m_Qs = [np.mean(Qs), np.std(Qs)]
write(sys.argv[2], m_Qs)
|
Add the analysis driver script
|
Add the analysis driver script
|
Python
|
mit
|
mdpiper/dakota-experiments,mdpiper/dakota-experiments,mdpiper/dakota-experiments,mcflugen/dakota-experiments,mcflugen/dakota-experiments
|
Add the analysis driver script
|
#! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call
import numpy as np
def read(file):
'''
Reads a column of text containing HydroTrend output. Returns a numpy array.
'''
with open(file, 'r') as f:
values = f.read().split('\n')
return np.array(values[2:-1], dtype=np.float) # Remove header lines and EOF.
def write(file, array):
'''
Writes a Dakota results file from an input array.
'''
labels = ['Qs_mean', 'Qs_stdev']
f = open(file, 'w')
for i in range(len(array)):
f.write(str(array[i]) + '\t' + labels[i] + '\n')
f.close()
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if not os.path.exists(input_dir): os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if not os.path.exists(output_dir): os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend', '--in-dir', input_dir, '--out-dir', output_dir])
# Calculate mean and standard deviation of Qs for the simulation
# time. Write the results to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
Qs = read(output_file)
m_Qs = [np.mean(Qs), np.std(Qs)]
write(sys.argv[2], m_Qs)
|
<commit_before><commit_msg>Add the analysis driver script<commit_after>
|
#! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call
import numpy as np
def read(file):
'''
Reads a column of text containing HydroTrend output. Returns a numpy array.
'''
with open(file, 'r') as f:
values = f.read().split('\n')
return np.array(values[2:-1], dtype=np.float) # Remove header lines and EOF.
def write(file, array):
'''
Writes a Dakota results file from an input array.
'''
labels = ['Qs_mean', 'Qs_stdev']
f = open(file, 'w')
for i in range(len(array)):
f.write(str(array[i]) + '\t' + labels[i] + '\n')
f.close()
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if not os.path.exists(input_dir): os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if not os.path.exists(output_dir): os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend', '--in-dir', input_dir, '--out-dir', output_dir])
# Calculate mean and standard deviation of Qs for the simulation
# time. Write the results to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
Qs = read(output_file)
m_Qs = [np.mean(Qs), np.std(Qs)]
write(sys.argv[2], m_Qs)
|
Add the analysis driver script#! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call
import numpy as np
def read(file):
'''
Reads a column of text containing HydroTrend output. Returns a numpy array.
'''
with open(file, 'r') as f:
values = f.read().split('\n')
return np.array(values[2:-1], dtype=np.float) # Remove header lines and EOF.
def write(file, array):
'''
Writes a Dakota results file from an input array.
'''
labels = ['Qs_mean', 'Qs_stdev']
f = open(file, 'w')
for i in range(len(array)):
f.write(str(array[i]) + '\t' + labels[i] + '\n')
f.close()
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if not os.path.exists(input_dir): os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if not os.path.exists(output_dir): os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend', '--in-dir', input_dir, '--out-dir', output_dir])
# Calculate mean and standard deviation of Qs for the simulation
# time. Write the results to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
Qs = read(output_file)
m_Qs = [np.mean(Qs), np.std(Qs)]
write(sys.argv[2], m_Qs)
|
<commit_before><commit_msg>Add the analysis driver script<commit_after>#! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import shutil
from subprocess import call
import numpy as np
def read(file):
'''
Reads a column of text containing HydroTrend output. Returns a numpy array.
'''
with open(file, 'r') as f:
values = f.read().split('\n')
return np.array(values[2:-1], dtype=np.float) # Remove header lines and EOF.
def write(file, array):
'''
Writes a Dakota results file from an input array.
'''
labels = ['Qs_mean', 'Qs_stdev']
f = open(file, 'w')
for i in range(len(array)):
f.write(str(array[i]) + '\t' + labels[i] + '\n')
f.close()
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if not os.path.exists(input_dir): os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if not os.path.exists(output_dir): os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend', '--in-dir', input_dir, '--out-dir', output_dir])
# Calculate mean and standard deviation of Qs for the simulation
# time. Write the results to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
Qs = read(output_file)
m_Qs = [np.mean(Qs), np.std(Qs)]
write(sys.argv[2], m_Qs)
|
|
13467a7989c0412a6d5e8815c3441acecd7f5d58
|
pymatgen/symmetry/tests/test_spacegroup.py
|
pymatgen/symmetry/tests/test_spacegroup.py
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: f539c5f894a6ebd867dfee0eeb5dc1248de11c97 [formerly 2a214050a30048eab177f696d891a33c2860bb55]
Former-commit-id: 26ca50b3c5eceb253d7371b8c0f2a4d08e7f48d7
|
Python
|
mit
|
johnson1228/pymatgen,mbkumar/pymatgen,matk86/pymatgen,gmatteo/pymatgen,montoyjh/pymatgen,richardtran415/pymatgen,johnson1228/pymatgen,ndardenne/pymatgen,czhengsci/pymatgen,tallakahath/pymatgen,Bismarrck/pymatgen,czhengsci/pymatgen,setten/pymatgen,aykol/pymatgen,mbkumar/pymatgen,dongsenfo/pymatgen,johnson1228/pymatgen,xhqu1981/pymatgen,xhqu1981/pymatgen,fraricci/pymatgen,gpetretto/pymatgen,gmatteo/pymatgen,Bismarrck/pymatgen,matk86/pymatgen,nisse3000/pymatgen,davidwaroquiers/pymatgen,montoyjh/pymatgen,Bismarrck/pymatgen,matk86/pymatgen,dongsenfo/pymatgen,gVallverdu/pymatgen,setten/pymatgen,aykol/pymatgen,tschaume/pymatgen,blondegeek/pymatgen,montoyjh/pymatgen,dongsenfo/pymatgen,nisse3000/pymatgen,nisse3000/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,czhengsci/pymatgen,davidwaroquiers/pymatgen,vorwerkc/pymatgen,blondegeek/pymatgen,matk86/pymatgen,Bismarrck/pymatgen,gpetretto/pymatgen,tschaume/pymatgen,davidwaroquiers/pymatgen,tallakahath/pymatgen,ndardenne/pymatgen,richardtran415/pymatgen,fraricci/pymatgen,mbkumar/pymatgen,gpetretto/pymatgen,vorwerkc/pymatgen,setten/pymatgen,ndardenne/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,tschaume/pymatgen,tallakahath/pymatgen,davidwaroquiers/pymatgen,vorwerkc/pymatgen,mbkumar/pymatgen,czhengsci/pymatgen,tschaume/pymatgen,blondegeek/pymatgen,gVallverdu/pymatgen,xhqu1981/pymatgen,johnson1228/pymatgen,dongsenfo/pymatgen,tschaume/pymatgen,aykol/pymatgen,vorwerkc/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,Bismarrck/pymatgen,nisse3000/pymatgen,setten/pymatgen,gpetretto/pymatgen,montoyjh/pymatgen,richardtran415/pymatgen
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: f539c5f894a6ebd867dfee0eeb5dc1248de11c97 [formerly 2a214050a30048eab177f696d891a33c2860bb55]
Former-commit-id: 26ca50b3c5eceb253d7371b8c0f2a4d08e7f48d7
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.
Former-commit-id: f539c5f894a6ebd867dfee0eeb5dc1248de11c97 [formerly 2a214050a30048eab177f696d891a33c2860bb55]
Former-commit-id: 26ca50b3c5eceb253d7371b8c0f2a4d08e7f48d7<commit_after>
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: f539c5f894a6ebd867dfee0eeb5dc1248de11c97 [formerly 2a214050a30048eab177f696d891a33c2860bb55]
Former-commit-id: 26ca50b3c5eceb253d7371b8c0f2a4d08e7f48d7#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.
Former-commit-id: f539c5f894a6ebd867dfee0eeb5dc1248de11c97 [formerly 2a214050a30048eab177f696d891a33c2860bb55]
Former-commit-id: 26ca50b3c5eceb253d7371b8c0f2a4d08e7f48d7<commit_after>#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
d576b654c9ae9b53630dd9133d50c7ddc34e1d3a
|
scripts/addUserCounts.py
|
scripts/addUserCounts.py
|
"""
To each region, let's add the number of users that tweeted in the region.
"""
import json
import pymongo
import twitterproj
def via_json():
json_files = [
'json/grids.states.bot_filtered.users.json',
'json/grids.counties.bot_filtered.users.json',
'json/grids.squares.bot_filtered.users.json'
]
db = twitterproj.connect()
collections = [
db.grids.states.bot_filtered,
db.grids.counties.bot_filtered,
db.grids.squares.bot_filtered,
]
keys = [
'fips',
'geoid',
'_id',
]
for fn, coll, key in zip(json_files, collections, keys):
print fn
avoid = {'min', 'max', 'norm'}
with open(fn) as f:
userCounts = json.load(f)
uc = {}
for k, v in userCounts.items():
if k in avoid:
continue
elif key == '_id':
# The keys need to be int.
k = int(k)
uc[k] = v
userCounts = uc
for k, count in userCounts.items():
if k in avoid:
continue
coll.update({key:k}, {"$set": {"user_count": count}}, upsert=False)
coll.create_index([("user_count", pymongo.DESCENDING)])
|
Add script used to add user counts stored in json file. Still need to modify this so that it is built directly.
|
Add script used to add user counts stored in json file.
Still need to modify this so that it is built directly.
|
Python
|
unlicense
|
chebee7i/twitter,chebee7i/twitter,chebee7i/twitter
|
Add script used to add user counts stored in json file.
Still need to modify this so that it is built directly.
|
"""
To each region, let's add the number of users that tweeted in the region.
"""
import json
import pymongo
import twitterproj
def via_json():
json_files = [
'json/grids.states.bot_filtered.users.json',
'json/grids.counties.bot_filtered.users.json',
'json/grids.squares.bot_filtered.users.json'
]
db = twitterproj.connect()
collections = [
db.grids.states.bot_filtered,
db.grids.counties.bot_filtered,
db.grids.squares.bot_filtered,
]
keys = [
'fips',
'geoid',
'_id',
]
for fn, coll, key in zip(json_files, collections, keys):
print fn
avoid = {'min', 'max', 'norm'}
with open(fn) as f:
userCounts = json.load(f)
uc = {}
for k, v in userCounts.items():
if k in avoid:
continue
elif key == '_id':
# The keys need to be int.
k = int(k)
uc[k] = v
userCounts = uc
for k, count in userCounts.items():
if k in avoid:
continue
coll.update({key:k}, {"$set": {"user_count": count}}, upsert=False)
coll.create_index([("user_count", pymongo.DESCENDING)])
|
<commit_before><commit_msg>Add script used to add user counts stored in json file.
Still need to modify this so that it is built directly.<commit_after>
|
"""
To each region, let's add the number of users that tweeted in the region.
"""
import json
import pymongo
import twitterproj
def via_json():
json_files = [
'json/grids.states.bot_filtered.users.json',
'json/grids.counties.bot_filtered.users.json',
'json/grids.squares.bot_filtered.users.json'
]
db = twitterproj.connect()
collections = [
db.grids.states.bot_filtered,
db.grids.counties.bot_filtered,
db.grids.squares.bot_filtered,
]
keys = [
'fips',
'geoid',
'_id',
]
for fn, coll, key in zip(json_files, collections, keys):
print fn
avoid = {'min', 'max', 'norm'}
with open(fn) as f:
userCounts = json.load(f)
uc = {}
for k, v in userCounts.items():
if k in avoid:
continue
elif key == '_id':
# The keys need to be int.
k = int(k)
uc[k] = v
userCounts = uc
for k, count in userCounts.items():
if k in avoid:
continue
coll.update({key:k}, {"$set": {"user_count": count}}, upsert=False)
coll.create_index([("user_count", pymongo.DESCENDING)])
|
Add script used to add user counts stored in json file.
Still need to modify this so that it is built directly."""
To each region, let's add the number of users that tweeted in the region.
"""
import json
import pymongo
import twitterproj
def via_json():
json_files = [
'json/grids.states.bot_filtered.users.json',
'json/grids.counties.bot_filtered.users.json',
'json/grids.squares.bot_filtered.users.json'
]
db = twitterproj.connect()
collections = [
db.grids.states.bot_filtered,
db.grids.counties.bot_filtered,
db.grids.squares.bot_filtered,
]
keys = [
'fips',
'geoid',
'_id',
]
for fn, coll, key in zip(json_files, collections, keys):
print fn
avoid = {'min', 'max', 'norm'}
with open(fn) as f:
userCounts = json.load(f)
uc = {}
for k, v in userCounts.items():
if k in avoid:
continue
elif key == '_id':
# The keys need to be int.
k = int(k)
uc[k] = v
userCounts = uc
for k, count in userCounts.items():
if k in avoid:
continue
coll.update({key:k}, {"$set": {"user_count": count}}, upsert=False)
coll.create_index([("user_count", pymongo.DESCENDING)])
|
<commit_before><commit_msg>Add script used to add user counts stored in json file.
Still need to modify this so that it is built directly.<commit_after>"""
To each region, let's add the number of users that tweeted in the region.
"""
import json
import pymongo
import twitterproj
def via_json():
json_files = [
'json/grids.states.bot_filtered.users.json',
'json/grids.counties.bot_filtered.users.json',
'json/grids.squares.bot_filtered.users.json'
]
db = twitterproj.connect()
collections = [
db.grids.states.bot_filtered,
db.grids.counties.bot_filtered,
db.grids.squares.bot_filtered,
]
keys = [
'fips',
'geoid',
'_id',
]
for fn, coll, key in zip(json_files, collections, keys):
print fn
avoid = {'min', 'max', 'norm'}
with open(fn) as f:
userCounts = json.load(f)
uc = {}
for k, v in userCounts.items():
if k in avoid:
continue
elif key == '_id':
# The keys need to be int.
k = int(k)
uc[k] = v
userCounts = uc
for k, count in userCounts.items():
if k in avoid:
continue
coll.update({key:k}, {"$set": {"user_count": count}}, upsert=False)
coll.create_index([("user_count", pymongo.DESCENDING)])
|
|
5894f4148f6b6311b2985f3b23d9c3545ce55fb1
|
bigml/multiple_models.py
|
bigml/multiple_models.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Multiple Local Predictive Model.
This module defines a Multiple Model to make predictions locally using multiple
local models.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
from bigml.api import BigML
from bigml.model import MultipleModel
api = BigML()
model = MultipleModel([api.get_model('model/5026965515526876630001b2')])
model.predict({"petal length": 3, "petal width": 1})
"""
import logging
LOGGER = logging.getLogger('BigML')
import numbers
from bigml.model import Model
def avg(data):
return float(sum(data))/len(data) if len(data) > 0 else float('nan')
class MultipleModel(object):
"""A multiple local model.
Uses a numbers of BigML remote models to build a local version that can be
used to generate predictions locally using mode voting for classification
and average voting for regression.
"""
def __init__(self, models):
self.models = []
if isinstance(models, list):
for model in models:
self.models.append(Model(model))
else:
self.models.apend(Model(models))
def predict(self, input_data):
"""Makes a prediction based on the prediction made by every model.
"""
predictions = []
for model in self.models:
predictions.append(model.predict(input_data, by_name=False))
if all([isinstance(prediction, numbers.Number) for prediction in
predictions]):
return avg(predictions)
else:
mode = {}
for prediction in predictions:
if prediction in mode:
mode[prediction] = mode[prediction] + 1
else:
mode[prediction] = 1
return max(mode)
|
Add simple predict method to multi models
|
Add simple predict method to multi models
|
Python
|
apache-2.0
|
xaowoodenfish/python-1,ShaguptaS/python,mmerce/python,jaor/python,bigmlcom/python
|
Add simple predict method to multi models
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Multiple Local Predictive Model.
This module defines a Multiple Model to make predictions locally using multiple
local models.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
from bigml.api import BigML
from bigml.model import MultipleModel
api = BigML()
model = MultipleModel([api.get_model('model/5026965515526876630001b2')])
model.predict({"petal length": 3, "petal width": 1})
"""
import logging
LOGGER = logging.getLogger('BigML')
import numbers
from bigml.model import Model
def avg(data):
return float(sum(data))/len(data) if len(data) > 0 else float('nan')
class MultipleModel(object):
"""A multiple local model.
Uses a numbers of BigML remote models to build a local version that can be
used to generate predictions locally using mode voting for classification
and average voting for regression.
"""
def __init__(self, models):
self.models = []
if isinstance(models, list):
for model in models:
self.models.append(Model(model))
else:
self.models.apend(Model(models))
def predict(self, input_data):
"""Makes a prediction based on the prediction made by every model.
"""
predictions = []
for model in self.models:
predictions.append(model.predict(input_data, by_name=False))
if all([isinstance(prediction, numbers.Number) for prediction in
predictions]):
return avg(predictions)
else:
mode = {}
for prediction in predictions:
if prediction in mode:
mode[prediction] = mode[prediction] + 1
else:
mode[prediction] = 1
return max(mode)
|
<commit_before><commit_msg>Add simple predict method to multi models<commit_after>
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Multiple Local Predictive Model.
This module defines a Multiple Model to make predictions locally using multiple
local models.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
from bigml.api import BigML
from bigml.model import MultipleModel
api = BigML()
model = MultipleModel([api.get_model('model/5026965515526876630001b2')])
model.predict({"petal length": 3, "petal width": 1})
"""
import logging
LOGGER = logging.getLogger('BigML')
import numbers
from bigml.model import Model
def avg(data):
return float(sum(data))/len(data) if len(data) > 0 else float('nan')
class MultipleModel(object):
"""A multiple local model.
Uses a numbers of BigML remote models to build a local version that can be
used to generate predictions locally using mode voting for classification
and average voting for regression.
"""
def __init__(self, models):
self.models = []
if isinstance(models, list):
for model in models:
self.models.append(Model(model))
else:
self.models.apend(Model(models))
def predict(self, input_data):
"""Makes a prediction based on the prediction made by every model.
"""
predictions = []
for model in self.models:
predictions.append(model.predict(input_data, by_name=False))
if all([isinstance(prediction, numbers.Number) for prediction in
predictions]):
return avg(predictions)
else:
mode = {}
for prediction in predictions:
if prediction in mode:
mode[prediction] = mode[prediction] + 1
else:
mode[prediction] = 1
return max(mode)
|
Add simple predict method to multi models# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Multiple Local Predictive Model.
This module defines a Multiple Model to make predictions locally using multiple
local models.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
from bigml.api import BigML
from bigml.model import MultipleModel
api = BigML()
model = MultipleModel([api.get_model('model/5026965515526876630001b2')])
model.predict({"petal length": 3, "petal width": 1})
"""
import logging
LOGGER = logging.getLogger('BigML')
import numbers
from bigml.model import Model
def avg(data):
return float(sum(data))/len(data) if len(data) > 0 else float('nan')
class MultipleModel(object):
"""A multiple local model.
Uses a numbers of BigML remote models to build a local version that can be
used to generate predictions locally using mode voting for classification
and average voting for regression.
"""
def __init__(self, models):
self.models = []
if isinstance(models, list):
for model in models:
self.models.append(Model(model))
else:
self.models.apend(Model(models))
def predict(self, input_data):
"""Makes a prediction based on the prediction made by every model.
"""
predictions = []
for model in self.models:
predictions.append(model.predict(input_data, by_name=False))
if all([isinstance(prediction, numbers.Number) for prediction in
predictions]):
return avg(predictions)
else:
mode = {}
for prediction in predictions:
if prediction in mode:
mode[prediction] = mode[prediction] + 1
else:
mode[prediction] = 1
return max(mode)
|
<commit_before><commit_msg>Add simple predict method to multi models<commit_after># -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A Multiple Local Predictive Model.
This module defines a Multiple Model to make predictions locally using multiple
local models.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
from bigml.api import BigML
from bigml.model import MultipleModel
api = BigML()
model = MultipleModel([api.get_model('model/5026965515526876630001b2')])
model.predict({"petal length": 3, "petal width": 1})
"""
import logging
LOGGER = logging.getLogger('BigML')
import numbers
from bigml.model import Model
def avg(data):
return float(sum(data))/len(data) if len(data) > 0 else float('nan')
class MultipleModel(object):
"""A multiple local model.
Uses a numbers of BigML remote models to build a local version that can be
used to generate predictions locally using mode voting for classification
and average voting for regression.
"""
def __init__(self, models):
self.models = []
if isinstance(models, list):
for model in models:
self.models.append(Model(model))
else:
self.models.apend(Model(models))
def predict(self, input_data):
"""Makes a prediction based on the prediction made by every model.
"""
predictions = []
for model in self.models:
predictions.append(model.predict(input_data, by_name=False))
if all([isinstance(prediction, numbers.Number) for prediction in
predictions]):
return avg(predictions)
else:
mode = {}
for prediction in predictions:
if prediction in mode:
mode[prediction] = mode[prediction] + 1
else:
mode[prediction] = 1
return max(mode)
|
|
b6353857dc124c8a9295cab865418ee888adf1e9
|
examples/plot_mne_example.py
|
examples/plot_mne_example.py
|
"""
Using NeuroDSP with MNE
=======================
This example explores some example analyses using NeuroDSP, integrated with MNE.
"""
###################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.burst import detect_bursts_dual_threshold
###################################################################################################
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = io.read_raw_fif(raw_fname, preload=True, verbose=False)
###################################################################################################
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=True, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
###################################################################################################
# Settings
ch_label = 'EEG 058'
t_start = 20000
t_stop = int(t_start + (10 * fs))
###################################################################################################
# Extract an example channel to explore
sig, times = raw.get_data(mne.pick_channels(raw.ch_names, [ch_label]), start=t_start, stop=t_stop, return_times=True)
sig = np.squeeze(sig)
###################################################################################################
# Plot a segment of the extracted time series data
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k')
###################################################################################################
# Calculate the power spectrum, using a median welch & extract a frequency range of interest
freqs, powers = compute_spectrum(sig, fs, method='median')
freqs, powers = trim_spectrum(freqs, powers, [3, 30])
###################################################################################################
# Check where the peak power is, in center frequency
peak_cf = freqs[np.argmax(powers)]
print(peak_cf)
###################################################################################################
# Plot the power spectra
plt.figure(figsize=(8, 8))
plt.semilogy(freqs, powers)
plt.plot(freqs[np.argmax(powers)], np.max(powers), '.r', ms=12)
###################################################################################################
# Burst Settings
amp_dual_thresh = (1., 1.5)
f_range = (peak_cf-2, peak_cf+2)
###################################################################################################
# Detect bursts of high amplitude oscillations in the extracted signal
bursting = detect_bursts_dual_threshold(sig, fs, f_range, amp_dual_thresh)
###################################################################################################
# Plot original signal and burst activity
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k', label='Raw Data')
plt.plot(times[bursting], sig[bursting], 'r', label='Detected Bursts')
plt.legend(loc='best')
###################################################################################################
|
Add draft of MNE example
|
Add draft of MNE example
|
Python
|
apache-2.0
|
srcole/neurodsp,srcole/neurodsp,voytekresearch/neurodsp
|
Add draft of MNE example
|
"""
Using NeuroDSP with MNE
=======================
This example explores some example analyses using NeuroDSP, integrated with MNE.
"""
###################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.burst import detect_bursts_dual_threshold
###################################################################################################
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = io.read_raw_fif(raw_fname, preload=True, verbose=False)
###################################################################################################
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=True, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
###################################################################################################
# Settings
ch_label = 'EEG 058'
t_start = 20000
t_stop = int(t_start + (10 * fs))
###################################################################################################
# Extract an example channel to explore
sig, times = raw.get_data(mne.pick_channels(raw.ch_names, [ch_label]), start=t_start, stop=t_stop, return_times=True)
sig = np.squeeze(sig)
###################################################################################################
# Plot a segment of the extracted time series data
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k')
###################################################################################################
# Calculate the power spectrum, using a median welch & extract a frequency range of interest
freqs, powers = compute_spectrum(sig, fs, method='median')
freqs, powers = trim_spectrum(freqs, powers, [3, 30])
###################################################################################################
# Check where the peak power is, in center frequency
peak_cf = freqs[np.argmax(powers)]
print(peak_cf)
###################################################################################################
# Plot the power spectra
plt.figure(figsize=(8, 8))
plt.semilogy(freqs, powers)
plt.plot(freqs[np.argmax(powers)], np.max(powers), '.r', ms=12)
###################################################################################################
# Burst Settings
amp_dual_thresh = (1., 1.5)
f_range = (peak_cf-2, peak_cf+2)
###################################################################################################
# Detect bursts of high amplitude oscillations in the extracted signal
bursting = detect_bursts_dual_threshold(sig, fs, f_range, amp_dual_thresh)
###################################################################################################
# Plot original signal and burst activity
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k', label='Raw Data')
plt.plot(times[bursting], sig[bursting], 'r', label='Detected Bursts')
plt.legend(loc='best')
###################################################################################################
|
<commit_before><commit_msg>Add draft of MNE example<commit_after>
|
"""
Using NeuroDSP with MNE
=======================
This example explores some example analyses using NeuroDSP, integrated with MNE.
"""
###################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.burst import detect_bursts_dual_threshold
###################################################################################################
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = io.read_raw_fif(raw_fname, preload=True, verbose=False)
###################################################################################################
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=True, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
###################################################################################################
# Settings
ch_label = 'EEG 058'
t_start = 20000
t_stop = int(t_start + (10 * fs))
###################################################################################################
# Extract an example channel to explore
sig, times = raw.get_data(mne.pick_channels(raw.ch_names, [ch_label]), start=t_start, stop=t_stop, return_times=True)
sig = np.squeeze(sig)
###################################################################################################
# Plot a segment of the extracted time series data
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k')
###################################################################################################
# Calculate the power spectrum, using a median welch & extract a frequency range of interest
freqs, powers = compute_spectrum(sig, fs, method='median')
freqs, powers = trim_spectrum(freqs, powers, [3, 30])
###################################################################################################
# Check where the peak power is, in center frequency
peak_cf = freqs[np.argmax(powers)]
print(peak_cf)
###################################################################################################
# Plot the power spectra
plt.figure(figsize=(8, 8))
plt.semilogy(freqs, powers)
plt.plot(freqs[np.argmax(powers)], np.max(powers), '.r', ms=12)
###################################################################################################
# Burst Settings
amp_dual_thresh = (1., 1.5)
f_range = (peak_cf-2, peak_cf+2)
###################################################################################################
# Detect bursts of high amplitude oscillations in the extracted signal
bursting = detect_bursts_dual_threshold(sig, fs, f_range, amp_dual_thresh)
###################################################################################################
# Plot original signal and burst activity
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k', label='Raw Data')
plt.plot(times[bursting], sig[bursting], 'r', label='Detected Bursts')
plt.legend(loc='best')
###################################################################################################
|
Add draft of MNE example"""
Using NeuroDSP with MNE
=======================
This example explores some example analyses using NeuroDSP, integrated with MNE.
"""
###################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.burst import detect_bursts_dual_threshold
###################################################################################################
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = io.read_raw_fif(raw_fname, preload=True, verbose=False)
###################################################################################################
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=True, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
###################################################################################################
# Settings
ch_label = 'EEG 058'
t_start = 20000
t_stop = int(t_start + (10 * fs))
###################################################################################################
# Extract an example channel to explore
sig, times = raw.get_data(mne.pick_channels(raw.ch_names, [ch_label]), start=t_start, stop=t_stop, return_times=True)
sig = np.squeeze(sig)
###################################################################################################
# Plot a segment of the extracted time series data
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k')
###################################################################################################
# Calculate the power spectrum, using a median welch & extract a frequency range of interest
freqs, powers = compute_spectrum(sig, fs, method='median')
freqs, powers = trim_spectrum(freqs, powers, [3, 30])
###################################################################################################
# Check where the peak power is, in center frequency
peak_cf = freqs[np.argmax(powers)]
print(peak_cf)
###################################################################################################
# Plot the power spectra
plt.figure(figsize=(8, 8))
plt.semilogy(freqs, powers)
plt.plot(freqs[np.argmax(powers)], np.max(powers), '.r', ms=12)
###################################################################################################
# Burst Settings
amp_dual_thresh = (1., 1.5)
f_range = (peak_cf-2, peak_cf+2)
###################################################################################################
# Detect bursts of high amplitude oscillations in the extracted signal
bursting = detect_bursts_dual_threshold(sig, fs, f_range, amp_dual_thresh)
###################################################################################################
# Plot original signal and burst activity
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k', label='Raw Data')
plt.plot(times[bursting], sig[bursting], 'r', label='Detected Bursts')
plt.legend(loc='best')
###################################################################################################
|
<commit_before><commit_msg>Add draft of MNE example<commit_after>"""
Using NeuroDSP with MNE
=======================
This example explores some example analyses using NeuroDSP, integrated with MNE.
"""
###################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.burst import detect_bursts_dual_threshold
###################################################################################################
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = io.read_raw_fif(raw_fname, preload=True, verbose=False)
###################################################################################################
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=True, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
###################################################################################################
# Settings
ch_label = 'EEG 058'
t_start = 20000
t_stop = int(t_start + (10 * fs))
###################################################################################################
# Extract an example channel to explore
sig, times = raw.get_data(mne.pick_channels(raw.ch_names, [ch_label]), start=t_start, stop=t_stop, return_times=True)
sig = np.squeeze(sig)
###################################################################################################
# Plot a segment of the extracted time series data
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k')
###################################################################################################
# Calculate the power spectrum, using a median welch & extract a frequency range of interest
freqs, powers = compute_spectrum(sig, fs, method='median')
freqs, powers = trim_spectrum(freqs, powers, [3, 30])
###################################################################################################
# Check where the peak power is, in center frequency
peak_cf = freqs[np.argmax(powers)]
print(peak_cf)
###################################################################################################
# Plot the power spectra
plt.figure(figsize=(8, 8))
plt.semilogy(freqs, powers)
plt.plot(freqs[np.argmax(powers)], np.max(powers), '.r', ms=12)
###################################################################################################
# Burst Settings
amp_dual_thresh = (1., 1.5)
f_range = (peak_cf-2, peak_cf+2)
###################################################################################################
# Detect bursts of high amplitude oscillations in the extracted signal
bursting = detect_bursts_dual_threshold(sig, fs, f_range, amp_dual_thresh)
###################################################################################################
# Plot original signal and burst activity
plt.figure(figsize=(16, 3))
plt.plot(times, sig, 'k', label='Raw Data')
plt.plot(times[bursting], sig[bursting], 'r', label='Detected Bursts')
plt.legend(loc='best')
###################################################################################################
|
|
44bbd4fd96791d14a4cb4165d049badf750397dc
|
bidb/keys/tasks.py
|
bidb/keys/tasks.py
|
import celery
import subprocess
from bidb.utils.tempfile import TemporaryDirectory
from .models import Key
@celery.task(soft_time_limit=60)
def update_or_create_key(uid):
with TemporaryDirectory() as homedir:
subprocess.check_call((
'gpg',
'--homedir', homedir,
'--keyserver', 'http://p80.pool.sks-keyservers.net/',
'--recv-keys', uid
))
data = subprocess.check_output((
'gpg',
'--homedir', homedir,
'--with-colons',
'--fixed-list-mode',
'--fingerprint',
uid,
))
for line in data.splitlines():
if line.startswith('uid:'):
name = line.split(':')[9]
break
else:
raise ValueError("Could not parse name from key: {}".format(data))
return Key.objects.update_or_create(uid=uid, defaults={
'name': name,
})
|
Add async task to update/create key ids.
|
Add async task to update/create key ids.
This might need to be reworked so that Key instances always exist but then
we fill in 'name' etc. later/asynchronously.
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org>
|
Python
|
agpl-3.0
|
lamby/buildinfo.debian.net,lamby/buildinfo.debian.net
|
Add async task to update/create key ids.
This might need to be reworked so that Key instances always exist but then
we fill in 'name' etc. later/asynchronously.
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org>
|
import celery
import subprocess
from bidb.utils.tempfile import TemporaryDirectory
from .models import Key
@celery.task(soft_time_limit=60)
def update_or_create_key(uid):
with TemporaryDirectory() as homedir:
subprocess.check_call((
'gpg',
'--homedir', homedir,
'--keyserver', 'http://p80.pool.sks-keyservers.net/',
'--recv-keys', uid
))
data = subprocess.check_output((
'gpg',
'--homedir', homedir,
'--with-colons',
'--fixed-list-mode',
'--fingerprint',
uid,
))
for line in data.splitlines():
if line.startswith('uid:'):
name = line.split(':')[9]
break
else:
raise ValueError("Could not parse name from key: {}".format(data))
return Key.objects.update_or_create(uid=uid, defaults={
'name': name,
})
|
<commit_before><commit_msg>Add async task to update/create key ids.
This might need to be reworked so that Key instances always exist but then
we fill in 'name' etc. later/asynchronously.
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org><commit_after>
|
import celery
import subprocess
from bidb.utils.tempfile import TemporaryDirectory
from .models import Key
@celery.task(soft_time_limit=60)
def update_or_create_key(uid):
with TemporaryDirectory() as homedir:
subprocess.check_call((
'gpg',
'--homedir', homedir,
'--keyserver', 'http://p80.pool.sks-keyservers.net/',
'--recv-keys', uid
))
data = subprocess.check_output((
'gpg',
'--homedir', homedir,
'--with-colons',
'--fixed-list-mode',
'--fingerprint',
uid,
))
for line in data.splitlines():
if line.startswith('uid:'):
name = line.split(':')[9]
break
else:
raise ValueError("Could not parse name from key: {}".format(data))
return Key.objects.update_or_create(uid=uid, defaults={
'name': name,
})
|
Add async task to update/create key ids.
This might need to be reworked so that Key instances always exist but then
we fill in 'name' etc. later/asynchronously.
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org>import celery
import subprocess
from bidb.utils.tempfile import TemporaryDirectory
from .models import Key
@celery.task(soft_time_limit=60)
def update_or_create_key(uid):
with TemporaryDirectory() as homedir:
subprocess.check_call((
'gpg',
'--homedir', homedir,
'--keyserver', 'http://p80.pool.sks-keyservers.net/',
'--recv-keys', uid
))
data = subprocess.check_output((
'gpg',
'--homedir', homedir,
'--with-colons',
'--fixed-list-mode',
'--fingerprint',
uid,
))
for line in data.splitlines():
if line.startswith('uid:'):
name = line.split(':')[9]
break
else:
raise ValueError("Could not parse name from key: {}".format(data))
return Key.objects.update_or_create(uid=uid, defaults={
'name': name,
})
|
<commit_before><commit_msg>Add async task to update/create key ids.
This might need to be reworked so that Key instances always exist but then
we fill in 'name' etc. later/asynchronously.
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org><commit_after>import celery
import subprocess
from bidb.utils.tempfile import TemporaryDirectory
from .models import Key
@celery.task(soft_time_limit=60)
def update_or_create_key(uid):
with TemporaryDirectory() as homedir:
subprocess.check_call((
'gpg',
'--homedir', homedir,
'--keyserver', 'http://p80.pool.sks-keyservers.net/',
'--recv-keys', uid
))
data = subprocess.check_output((
'gpg',
'--homedir', homedir,
'--with-colons',
'--fixed-list-mode',
'--fingerprint',
uid,
))
for line in data.splitlines():
if line.startswith('uid:'):
name = line.split(':')[9]
break
else:
raise ValueError("Could not parse name from key: {}".format(data))
return Key.objects.update_or_create(uid=uid, defaults={
'name': name,
})
|
|
bcda46423bd28b60aac8a9befd3e06670a9675c8
|
sync_scheduler.py
|
sync_scheduler.py
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish(str(user["_id"]), routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
Include queued date in MQ messages
|
Include queued date in MQ messages
|
Python
|
apache-2.0
|
abs0/tapiriik,mduggan/tapiriik,marxin/tapiriik,niosus/tapiriik,marxin/tapiriik,gavioto/tapiriik,dmschreiber/tapiriik,dlenski/tapiriik,campbellr/tapiriik,abhijit86k/tapiriik,gavioto/tapiriik,dlenski/tapiriik,abhijit86k/tapiriik,marxin/tapiriik,dmschreiber/tapiriik,dmschreiber/tapiriik,olamy/tapiriik,campbellr/tapiriik,cpfair/tapiriik,marxin/tapiriik,brunoflores/tapiriik,cmgrote/tapiriik,cpfair/tapiriik,abhijit86k/tapiriik,cheatos101/tapiriik,cpfair/tapiriik,mjnbike/tapiriik,gavioto/tapiriik,mduggan/tapiriik,mduggan/tapiriik,mjnbike/tapiriik,cmgrote/tapiriik,abhijit86k/tapiriik,cheatos101/tapiriik,dmschreiber/tapiriik,brunoflores/tapiriik,cpfair/tapiriik,cheatos101/tapiriik,brunoflores/tapiriik,cgourlay/tapiriik,abs0/tapiriik,campbellr/tapiriik,mduggan/tapiriik,cheatos101/tapiriik,brunoflores/tapiriik,campbellr/tapiriik,cmgrote/tapiriik,abs0/tapiriik,dlenski/tapiriik,niosus/tapiriik,dlenski/tapiriik,cgourlay/tapiriik,olamy/tapiriik,cgourlay/tapiriik,cmgrote/tapiriik,niosus/tapiriik,niosus/tapiriik,gavioto/tapiriik,olamy/tapiriik,mjnbike/tapiriik,cgourlay/tapiriik,abs0/tapiriik,mjnbike/tapiriik,olamy/tapiriik
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish(str(user["_id"]), routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
Include queued date in MQ messages
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
<commit_before>from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish(str(user["_id"]), routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
<commit_msg>Include queued date in MQ messages<commit_after>
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish(str(user["_id"]), routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
Include queued date in MQ messagesfrom tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
<commit_before>from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish(str(user["_id"]), routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
<commit_msg>Include queued date in MQ messages<commit_after>from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
5368a3728560f0da460a19b67af4c607dc01a0c7
|
teemof/analyze.py
|
teemof/analyze.py
|
# Analyze thermal conductivity results
# Date: June 2017
# Author: Kutay B. Sezginel
import os
import numpy as np
from teemof.read import avg_kt, get_kt, read_runs, read_legend
def analyze_trial_set(trial_set_dir, xkey='mass2', sort=True, t0=10, t1=20):
""" Read thermal conductivity for a set of trials, get approximate kt values """
trial_error, trial_std, x_data, y_data = [], [], [], []
for trial in os.listdir(trial_set_dir):
# Read each direction and and each run for given trial (30 data list)
trial_dir = os.path.join(trial_set_dir, trial)
run_data, time, runs_id = read_runs(trial_dir, verbose=False)
# Get avg kt for each direction and each run
run_kt = []
for d in run_data:
run_avg = get_kt(d, time, t0=10, t1=20)
run_kt.append(run_avg)
# Calculate standard deviation and error
run_std = np.std(run_kt)
trial_std.append(run_std)
min_kt, max_kt = min(run_kt), max(run_kt)
trial_error.append([min_kt, max_kt])
# Get average thermal conductivity for trial
avg_data = avg_kt(run_data)
trial_kt = get_kt(avg_data, time, t0=t0, t1=t1)
y_data.append(trial_kt)
x_value = read_legend(trial_dir, key=xkey)
x_data.append(x_value)
if sort:
x = [i[0] for i in sorted(zip(x_data, y_data))]
y = [i[1] for i in sorted(zip(x_data, y_data))]
else:
x, y = x_data, y_data
return dict(x=x, y=y, err=trial_error, std=trial_std)
|
Add thermal conductivity trend analysis library.
|
Add thermal conductivity trend analysis library.
|
Python
|
mit
|
kbsezginel/tee_mof,kbsezginel/tee_mof
|
Add thermal conductivity trend analysis library.
|
# Analyze thermal conductivity results
# Date: June 2017
# Author: Kutay B. Sezginel
import os
import numpy as np
from teemof.read import avg_kt, get_kt, read_runs, read_legend
def analyze_trial_set(trial_set_dir, xkey='mass2', sort=True, t0=10, t1=20):
""" Read thermal conductivity for a set of trials, get approximate kt values """
trial_error, trial_std, x_data, y_data = [], [], [], []
for trial in os.listdir(trial_set_dir):
# Read each direction and and each run for given trial (30 data list)
trial_dir = os.path.join(trial_set_dir, trial)
run_data, time, runs_id = read_runs(trial_dir, verbose=False)
# Get avg kt for each direction and each run
run_kt = []
for d in run_data:
run_avg = get_kt(d, time, t0=10, t1=20)
run_kt.append(run_avg)
# Calculate standard deviation and error
run_std = np.std(run_kt)
trial_std.append(run_std)
min_kt, max_kt = min(run_kt), max(run_kt)
trial_error.append([min_kt, max_kt])
# Get average thermal conductivity for trial
avg_data = avg_kt(run_data)
trial_kt = get_kt(avg_data, time, t0=t0, t1=t1)
y_data.append(trial_kt)
x_value = read_legend(trial_dir, key=xkey)
x_data.append(x_value)
if sort:
x = [i[0] for i in sorted(zip(x_data, y_data))]
y = [i[1] for i in sorted(zip(x_data, y_data))]
else:
x, y = x_data, y_data
return dict(x=x, y=y, err=trial_error, std=trial_std)
|
<commit_before><commit_msg>Add thermal conductivity trend analysis library.<commit_after>
|
# Analyze thermal conductivity results
# Date: June 2017
# Author: Kutay B. Sezginel
import os
import numpy as np
from teemof.read import avg_kt, get_kt, read_runs, read_legend
def analyze_trial_set(trial_set_dir, xkey='mass2', sort=True, t0=10, t1=20):
""" Read thermal conductivity for a set of trials, get approximate kt values """
trial_error, trial_std, x_data, y_data = [], [], [], []
for trial in os.listdir(trial_set_dir):
# Read each direction and and each run for given trial (30 data list)
trial_dir = os.path.join(trial_set_dir, trial)
run_data, time, runs_id = read_runs(trial_dir, verbose=False)
# Get avg kt for each direction and each run
run_kt = []
for d in run_data:
run_avg = get_kt(d, time, t0=10, t1=20)
run_kt.append(run_avg)
# Calculate standard deviation and error
run_std = np.std(run_kt)
trial_std.append(run_std)
min_kt, max_kt = min(run_kt), max(run_kt)
trial_error.append([min_kt, max_kt])
# Get average thermal conductivity for trial
avg_data = avg_kt(run_data)
trial_kt = get_kt(avg_data, time, t0=t0, t1=t1)
y_data.append(trial_kt)
x_value = read_legend(trial_dir, key=xkey)
x_data.append(x_value)
if sort:
x = [i[0] for i in sorted(zip(x_data, y_data))]
y = [i[1] for i in sorted(zip(x_data, y_data))]
else:
x, y = x_data, y_data
return dict(x=x, y=y, err=trial_error, std=trial_std)
|
Add thermal conductivity trend analysis library.# Analyze thermal conductivity results
# Date: June 2017
# Author: Kutay B. Sezginel
import os
import numpy as np
from teemof.read import avg_kt, get_kt, read_runs, read_legend
def analyze_trial_set(trial_set_dir, xkey='mass2', sort=True, t0=10, t1=20):
""" Read thermal conductivity for a set of trials, get approximate kt values """
trial_error, trial_std, x_data, y_data = [], [], [], []
for trial in os.listdir(trial_set_dir):
# Read each direction and and each run for given trial (30 data list)
trial_dir = os.path.join(trial_set_dir, trial)
run_data, time, runs_id = read_runs(trial_dir, verbose=False)
# Get avg kt for each direction and each run
run_kt = []
for d in run_data:
run_avg = get_kt(d, time, t0=10, t1=20)
run_kt.append(run_avg)
# Calculate standard deviation and error
run_std = np.std(run_kt)
trial_std.append(run_std)
min_kt, max_kt = min(run_kt), max(run_kt)
trial_error.append([min_kt, max_kt])
# Get average thermal conductivity for trial
avg_data = avg_kt(run_data)
trial_kt = get_kt(avg_data, time, t0=t0, t1=t1)
y_data.append(trial_kt)
x_value = read_legend(trial_dir, key=xkey)
x_data.append(x_value)
if sort:
x = [i[0] for i in sorted(zip(x_data, y_data))]
y = [i[1] for i in sorted(zip(x_data, y_data))]
else:
x, y = x_data, y_data
return dict(x=x, y=y, err=trial_error, std=trial_std)
|
<commit_before><commit_msg>Add thermal conductivity trend analysis library.<commit_after># Analyze thermal conductivity results
# Date: June 2017
# Author: Kutay B. Sezginel
import os
import numpy as np
from teemof.read import avg_kt, get_kt, read_runs, read_legend
def analyze_trial_set(trial_set_dir, xkey='mass2', sort=True, t0=10, t1=20):
""" Read thermal conductivity for a set of trials, get approximate kt values """
trial_error, trial_std, x_data, y_data = [], [], [], []
for trial in os.listdir(trial_set_dir):
# Read each direction and and each run for given trial (30 data list)
trial_dir = os.path.join(trial_set_dir, trial)
run_data, time, runs_id = read_runs(trial_dir, verbose=False)
# Get avg kt for each direction and each run
run_kt = []
for d in run_data:
run_avg = get_kt(d, time, t0=10, t1=20)
run_kt.append(run_avg)
# Calculate standard deviation and error
run_std = np.std(run_kt)
trial_std.append(run_std)
min_kt, max_kt = min(run_kt), max(run_kt)
trial_error.append([min_kt, max_kt])
# Get average thermal conductivity for trial
avg_data = avg_kt(run_data)
trial_kt = get_kt(avg_data, time, t0=t0, t1=t1)
y_data.append(trial_kt)
x_value = read_legend(trial_dir, key=xkey)
x_data.append(x_value)
if sort:
x = [i[0] for i in sorted(zip(x_data, y_data))]
y = [i[1] for i in sorted(zip(x_data, y_data))]
else:
x, y = x_data, y_data
return dict(x=x, y=y, err=trial_error, std=trial_std)
|
|
e01798c18faa59b2bedd8bd5e592a967512d94ef
|
16B/spw_setup.py
|
16B/spw_setup.py
|
# Line SPW setup for 16B projects
linespw_dict = {0: ["HI", "1.420405752GHz"],
3: ["OH1612", "1.612231GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
9: ["H152alp", "1.85425GHz"],
8: ["H153alp", "1.81825GHz"],
1: ["H166alp", "1.42473GHz"],
4: ["H158alp", "1.65154GHz"],
2: ["H164alp", "1.47734GHz"]}
|
Add line SPW setup info for 16B
|
Add line SPW setup info for 16B
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Add line SPW setup info for 16B
|
# Line SPW setup for 16B projects
linespw_dict = {0: ["HI", "1.420405752GHz"],
3: ["OH1612", "1.612231GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
9: ["H152alp", "1.85425GHz"],
8: ["H153alp", "1.81825GHz"],
1: ["H166alp", "1.42473GHz"],
4: ["H158alp", "1.65154GHz"],
2: ["H164alp", "1.47734GHz"]}
|
<commit_before><commit_msg>Add line SPW setup info for 16B<commit_after>
|
# Line SPW setup for 16B projects
linespw_dict = {0: ["HI", "1.420405752GHz"],
3: ["OH1612", "1.612231GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
9: ["H152alp", "1.85425GHz"],
8: ["H153alp", "1.81825GHz"],
1: ["H166alp", "1.42473GHz"],
4: ["H158alp", "1.65154GHz"],
2: ["H164alp", "1.47734GHz"]}
|
Add line SPW setup info for 16B
# Line SPW setup for 16B projects
linespw_dict = {0: ["HI", "1.420405752GHz"],
3: ["OH1612", "1.612231GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
9: ["H152alp", "1.85425GHz"],
8: ["H153alp", "1.81825GHz"],
1: ["H166alp", "1.42473GHz"],
4: ["H158alp", "1.65154GHz"],
2: ["H164alp", "1.47734GHz"]}
|
<commit_before><commit_msg>Add line SPW setup info for 16B<commit_after>
# Line SPW setup for 16B projects
linespw_dict = {0: ["HI", "1.420405752GHz"],
3: ["OH1612", "1.612231GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
9: ["H152alp", "1.85425GHz"],
8: ["H153alp", "1.81825GHz"],
1: ["H166alp", "1.42473GHz"],
4: ["H158alp", "1.65154GHz"],
2: ["H164alp", "1.47734GHz"]}
|
|
ed9375ef9ed0c7d6b98c827db2db0f59369eedb6
|
make-cbr.py
|
make-cbr.py
|
#!/usr/bin/env python
from __future__ import print_function
from path import Path
import sys
import subprocess
import re
for d in sys.argv[1:]:
filename = re.sub('\.?(/Check me|pdf|rar)$', '', d) + ".cbr"
dir = Path(d)
jpgs = dir.files('*.jpg') + dir.files('*.jpeg')
if len(jpgs) < 10:
print('not enough jpges found to make a cbr: ' + d, file=sys.stderr)
continue
command = ['/usr/local/bin/rar', 'a', filename]
try:
subprocess.check_call(command + jpgs)
except subprocess.CalledProcessError:
print('"{}" returned non-zero exit status'.format(command), file=sys.stderr)
print(d)
|
Add script to make cbr archive out of jpegs
|
Add script to make cbr archive out of jpegs
The cbr commic archive format is just jpegs compressed with rar.
This script will automate the process of creating one from a folder of
jpegs.
|
Python
|
bsd-3-clause
|
FreekKalter/linux-scripts,FreekKalter/linux-scripts,FreekKalter/linux-scripts,FreekKalter/linux-scripts
|
Add script to make cbr archive out of jpegs
The cbr commic archive format is just jpegs compressed with rar.
This script will automate the process of creating one from a folder of
jpegs.
|
#!/usr/bin/env python
from __future__ import print_function
from path import Path
import sys
import subprocess
import re
for d in sys.argv[1:]:
filename = re.sub('\.?(/Check me|pdf|rar)$', '', d) + ".cbr"
dir = Path(d)
jpgs = dir.files('*.jpg') + dir.files('*.jpeg')
if len(jpgs) < 10:
print('not enough jpges found to make a cbr: ' + d, file=sys.stderr)
continue
command = ['/usr/local/bin/rar', 'a', filename]
try:
subprocess.check_call(command + jpgs)
except subprocess.CalledProcessError:
print('"{}" returned non-zero exit status'.format(command), file=sys.stderr)
print(d)
|
<commit_before><commit_msg>Add script to make cbr archive out of jpegs
The cbr commic archive format is just jpegs compressed with rar.
This script will automate the process of creating one from a folder of
jpegs.<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
from path import Path
import sys
import subprocess
import re
for d in sys.argv[1:]:
filename = re.sub('\.?(/Check me|pdf|rar)$', '', d) + ".cbr"
dir = Path(d)
jpgs = dir.files('*.jpg') + dir.files('*.jpeg')
if len(jpgs) < 10:
print('not enough jpges found to make a cbr: ' + d, file=sys.stderr)
continue
command = ['/usr/local/bin/rar', 'a', filename]
try:
subprocess.check_call(command + jpgs)
except subprocess.CalledProcessError:
print('"{}" returned non-zero exit status'.format(command), file=sys.stderr)
print(d)
|
Add script to make cbr archive out of jpegs
The cbr commic archive format is just jpegs compressed with rar.
This script will automate the process of creating one from a folder of
jpegs.#!/usr/bin/env python
from __future__ import print_function
from path import Path
import sys
import subprocess
import re
for d in sys.argv[1:]:
filename = re.sub('\.?(/Check me|pdf|rar)$', '', d) + ".cbr"
dir = Path(d)
jpgs = dir.files('*.jpg') + dir.files('*.jpeg')
if len(jpgs) < 10:
print('not enough jpges found to make a cbr: ' + d, file=sys.stderr)
continue
command = ['/usr/local/bin/rar', 'a', filename]
try:
subprocess.check_call(command + jpgs)
except subprocess.CalledProcessError:
print('"{}" returned non-zero exit status'.format(command), file=sys.stderr)
print(d)
|
<commit_before><commit_msg>Add script to make cbr archive out of jpegs
The cbr commic archive format is just jpegs compressed with rar.
This script will automate the process of creating one from a folder of
jpegs.<commit_after>#!/usr/bin/env python
from __future__ import print_function
from path import Path
import sys
import subprocess
import re
for d in sys.argv[1:]:
filename = re.sub('\.?(/Check me|pdf|rar)$', '', d) + ".cbr"
dir = Path(d)
jpgs = dir.files('*.jpg') + dir.files('*.jpeg')
if len(jpgs) < 10:
print('not enough jpges found to make a cbr: ' + d, file=sys.stderr)
continue
command = ['/usr/local/bin/rar', 'a', filename]
try:
subprocess.check_call(command + jpgs)
except subprocess.CalledProcessError:
print('"{}" returned non-zero exit status'.format(command), file=sys.stderr)
print(d)
|
|
5be4fde44d7ce7cb7937f2dccdd097aa47faf0d7
|
DataWrangling/process_csv.py
|
DataWrangling/process_csv.py
|
# -*- coding: utf-8 -*-
'''
Transform csv files in dict structures and print in a pretty form
'''
import os
import pprint
import csv
# Set the directory for the data and the name of the file
DATADIR = '../Data/'
DATAFILE = 'beatles-diskography.csv'
def parse_csv(datafile):
data = []
# Open the file
with open(datafile, 'rb') as sd:
# Read data as a dictionary
r = csv.DictReader(sd)
for line in r:
data.append(line)
return data
if __name__ == '__main__':
# get the file for work
datafile = os.path.join(DATADIR, DATAFILE)
# return a dict
d = parse_csv(datafile)
# print data
pprint.pprint(d)
|
Add script to transform csv files in dict structure
|
feat: Add script to transform csv files in dict structure
Reads a csv file to transform in dict structure and then print it
in a frienly format
|
Python
|
mit
|
aguijarro/DataSciencePython
|
feat: Add script to transform csv files in dict structure
Reads a csv file to transform in dict structure and then print it
in a frienly format
|
# -*- coding: utf-8 -*-
'''
Transform csv files in dict structures and print in a pretty form
'''
import os
import pprint
import csv
# Set the directory for the data and the name of the file
DATADIR = '../Data/'
DATAFILE = 'beatles-diskography.csv'
def parse_csv(datafile):
data = []
# Open the file
with open(datafile, 'rb') as sd:
# Read data as a dictionary
r = csv.DictReader(sd)
for line in r:
data.append(line)
return data
if __name__ == '__main__':
# get the file for work
datafile = os.path.join(DATADIR, DATAFILE)
# return a dict
d = parse_csv(datafile)
# print data
pprint.pprint(d)
|
<commit_before><commit_msg>feat: Add script to transform csv files in dict structure
Reads a csv file to transform in dict structure and then print it
in a frienly format<commit_after>
|
# -*- coding: utf-8 -*-
'''
Transform csv files in dict structures and print in a pretty form
'''
import os
import pprint
import csv
# Set the directory for the data and the name of the file
DATADIR = '../Data/'
DATAFILE = 'beatles-diskography.csv'
def parse_csv(datafile):
data = []
# Open the file
with open(datafile, 'rb') as sd:
# Read data as a dictionary
r = csv.DictReader(sd)
for line in r:
data.append(line)
return data
if __name__ == '__main__':
# get the file for work
datafile = os.path.join(DATADIR, DATAFILE)
# return a dict
d = parse_csv(datafile)
# print data
pprint.pprint(d)
|
feat: Add script to transform csv files in dict structure
Reads a csv file to transform in dict structure and then print it
in a frienly format# -*- coding: utf-8 -*-
'''
Transform csv files in dict structures and print in a pretty form
'''
import os
import pprint
import csv
# Set the directory for the data and the name of the file
DATADIR = '../Data/'
DATAFILE = 'beatles-diskography.csv'
def parse_csv(datafile):
data = []
# Open the file
with open(datafile, 'rb') as sd:
# Read data as a dictionary
r = csv.DictReader(sd)
for line in r:
data.append(line)
return data
if __name__ == '__main__':
# get the file for work
datafile = os.path.join(DATADIR, DATAFILE)
# return a dict
d = parse_csv(datafile)
# print data
pprint.pprint(d)
|
<commit_before><commit_msg>feat: Add script to transform csv files in dict structure
Reads a csv file to transform in dict structure and then print it
in a frienly format<commit_after># -*- coding: utf-8 -*-
'''
Transform csv files in dict structures and print in a pretty form
'''
import os
import pprint
import csv
# Set the directory for the data and the name of the file
DATADIR = '../Data/'
DATAFILE = 'beatles-diskography.csv'
def parse_csv(datafile):
data = []
# Open the file
with open(datafile, 'rb') as sd:
# Read data as a dictionary
r = csv.DictReader(sd)
for line in r:
data.append(line)
return data
if __name__ == '__main__':
# get the file for work
datafile = os.path.join(DATADIR, DATAFILE)
# return a dict
d = parse_csv(datafile)
# print data
pprint.pprint(d)
|
|
e14ee15116ee2137d528d298ca38e26e4f02f09f
|
htpcfrontend.py
|
htpcfrontend.py
|
from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
# currently playing
#currently_playing = xbmc.VideoPlaylist.GetItems(id=1)
#time = xbmc.VideoPlayer.GetTime()
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
|
from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
|
Revert "retrieve currently playing info (commented out)"
|
Revert "retrieve currently playing info (commented out)"
This reverts commit 2c07f2110c844ce86af2fc0b818db196379d9310.
|
Python
|
mit
|
robweber/maraschino,insertnamehere1/maraschino,mrkipling/maraschino,gugahoi/maraschino,runjmc/maraschino,runjmc/maraschino,insertnamehere1/maraschino,gugahoi/maraschino,mboeru/maraschino,awagnon/maraschino,mrkipling/maraschino,insertnamehere1/maraschino,mboeru/maraschino,awagnon/maraschino,robweber/maraschino,awagnon/maraschino,insertnamehere1/maraschino,mrkipling/maraschino,runjmc/maraschino,insertnamehere1/maraschino,mboeru/maraschino,runjmc/maraschino,runjmc/maraschino,mboeru/maraschino,gugahoi/maraschino,awagnon/maraschino,robweber/maraschino,mboeru/maraschino,gugahoi/maraschino,mrkipling/maraschino,robweber/maraschino,awagnon/maraschino,robweber/maraschino,mrkipling/maraschino
|
from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
# currently playing
#currently_playing = xbmc.VideoPlaylist.GetItems(id=1)
#time = xbmc.VideoPlayer.GetTime()
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
Revert "retrieve currently playing info (commented out)"
This reverts commit 2c07f2110c844ce86af2fc0b818db196379d9310.
|
from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before>from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
# currently playing
#currently_playing = xbmc.VideoPlaylist.GetItems(id=1)
#time = xbmc.VideoPlayer.GetTime()
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
<commit_msg>Revert "retrieve currently playing info (commented out)"
This reverts commit 2c07f2110c844ce86af2fc0b818db196379d9310.<commit_after>
|
from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
|
from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
# currently playing
#currently_playing = xbmc.VideoPlaylist.GetItems(id=1)
#time = xbmc.VideoPlayer.GetTime()
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
Revert "retrieve currently playing info (commented out)"
This reverts commit 2c07f2110c844ce86af2fc0b818db196379d9310.from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before>from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
# currently playing
#currently_playing = xbmc.VideoPlaylist.GetItems(id=1)
#time = xbmc.VideoPlayer.GetTime()
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
<commit_msg>Revert "retrieve currently playing info (commented out)"
This reverts commit 2c07f2110c844ce86af2fc0b818db196379d9310.<commit_after>from flask import Flask, render_template
from settings import *
import jsonrpclib
app = Flask(__name__)
@app.route('/')
def index():
xbmc = jsonrpclib.Server(SERVER_ADDRESS)
episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes()
recently_added_episodes = []
# tidy up filenames of recently added episodes
for episode in episodes['episodes'][:NUM_RECENT_EPISODES]:
filename = episode['file'].split('/').pop().replace('.', ' ')
recently_added_episodes.append(filename)
return render_template('index.html',
recently_added_episodes = recently_added_episodes,
applications = APPLICATIONS
)
if __name__ == '__main__':
app.run(debug=True)
|
066c248ca31b13bcde7ace756c27cf4d8b46ef9d
|
tests/test_authors.py
|
tests/test_authors.py
|
from os.path import dirname, join
import subprocess
import unittest
from cvsgit.command.clone import Clone
from cvsgit.git import Git
from cvsgit.utils import Tempdir
from cvsgit.main import UnknownAuthorFullnames
class Test(unittest.TestCase):
def setUp(self):
self.tempdir = Tempdir(cwd=True)
self.tempdir.__enter__()
self.cvsdir = join(dirname(__file__), 'data', 'greek', 'tree')
self.gitdir = 'tree'
def tearDown(self):
self.tempdir.__exit__(None, None, None)
def cvs_clone(self, *args):
"""Clone CVS repo with default and additional arguments.
"""
args += (self.cvsdir, self.gitdir)
self.assertEquals(0, Clone().eval('--quiet', '--no-skip-latest', *args))
def git_authors(self):
"""Return author name and email addresses from git.
"""
return Git(self.gitdir).check_command('log', '--format=%an <%ae>',
stdout=subprocess.PIPE)
def test_clone_without_authors(self):
"""Clone without author mapping.
"""
self.cvs_clone()
self.assertEquals('uwe <uwe>', self.git_authors())
def test_clone_with_authors_name(self):
"""Clone with author fullname mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude\n')
self.cvs_clone('--domain=example.com', '--authors=authors')
self.assertEquals('Some Dude <uwe@example.com>', self.git_authors())
def test_clone_with_authors_name_and_email(self):
"""Clone with author fullname and email mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude <dude@example.com>\n')
self.cvs_clone('--authors=authors')
self.assertEquals('Some Dude <dude@example.com>', self.git_authors())
def test_clone_with_unknown_auhtor(self):
"""Clone with unknown author and --stop-on-unknown-author.
"""
with open('authors', 'w') as authors:
authors.write('nobody Non-existent User\n')
with self.assertRaises(UnknownAuthorFullnames):
self.cvs_clone('--authors=authors', '--stop-on-unknown-author')
|
Add test case for authors mapping
|
Add test case for authors mapping
|
Python
|
isc
|
ustuehler/git-cvs,ustuehler/git-cvs
|
Add test case for authors mapping
|
from os.path import dirname, join
import subprocess
import unittest
from cvsgit.command.clone import Clone
from cvsgit.git import Git
from cvsgit.utils import Tempdir
from cvsgit.main import UnknownAuthorFullnames
class Test(unittest.TestCase):
def setUp(self):
self.tempdir = Tempdir(cwd=True)
self.tempdir.__enter__()
self.cvsdir = join(dirname(__file__), 'data', 'greek', 'tree')
self.gitdir = 'tree'
def tearDown(self):
self.tempdir.__exit__(None, None, None)
def cvs_clone(self, *args):
"""Clone CVS repo with default and additional arguments.
"""
args += (self.cvsdir, self.gitdir)
self.assertEquals(0, Clone().eval('--quiet', '--no-skip-latest', *args))
def git_authors(self):
"""Return author name and email addresses from git.
"""
return Git(self.gitdir).check_command('log', '--format=%an <%ae>',
stdout=subprocess.PIPE)
def test_clone_without_authors(self):
"""Clone without author mapping.
"""
self.cvs_clone()
self.assertEquals('uwe <uwe>', self.git_authors())
def test_clone_with_authors_name(self):
"""Clone with author fullname mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude\n')
self.cvs_clone('--domain=example.com', '--authors=authors')
self.assertEquals('Some Dude <uwe@example.com>', self.git_authors())
def test_clone_with_authors_name_and_email(self):
"""Clone with author fullname and email mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude <dude@example.com>\n')
self.cvs_clone('--authors=authors')
self.assertEquals('Some Dude <dude@example.com>', self.git_authors())
def test_clone_with_unknown_auhtor(self):
"""Clone with unknown author and --stop-on-unknown-author.
"""
with open('authors', 'w') as authors:
authors.write('nobody Non-existent User\n')
with self.assertRaises(UnknownAuthorFullnames):
self.cvs_clone('--authors=authors', '--stop-on-unknown-author')
|
<commit_before><commit_msg>Add test case for authors mapping<commit_after>
|
from os.path import dirname, join
import subprocess
import unittest
from cvsgit.command.clone import Clone
from cvsgit.git import Git
from cvsgit.utils import Tempdir
from cvsgit.main import UnknownAuthorFullnames
class Test(unittest.TestCase):
def setUp(self):
self.tempdir = Tempdir(cwd=True)
self.tempdir.__enter__()
self.cvsdir = join(dirname(__file__), 'data', 'greek', 'tree')
self.gitdir = 'tree'
def tearDown(self):
self.tempdir.__exit__(None, None, None)
def cvs_clone(self, *args):
"""Clone CVS repo with default and additional arguments.
"""
args += (self.cvsdir, self.gitdir)
self.assertEquals(0, Clone().eval('--quiet', '--no-skip-latest', *args))
def git_authors(self):
"""Return author name and email addresses from git.
"""
return Git(self.gitdir).check_command('log', '--format=%an <%ae>',
stdout=subprocess.PIPE)
def test_clone_without_authors(self):
"""Clone without author mapping.
"""
self.cvs_clone()
self.assertEquals('uwe <uwe>', self.git_authors())
def test_clone_with_authors_name(self):
"""Clone with author fullname mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude\n')
self.cvs_clone('--domain=example.com', '--authors=authors')
self.assertEquals('Some Dude <uwe@example.com>', self.git_authors())
def test_clone_with_authors_name_and_email(self):
"""Clone with author fullname and email mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude <dude@example.com>\n')
self.cvs_clone('--authors=authors')
self.assertEquals('Some Dude <dude@example.com>', self.git_authors())
def test_clone_with_unknown_auhtor(self):
"""Clone with unknown author and --stop-on-unknown-author.
"""
with open('authors', 'w') as authors:
authors.write('nobody Non-existent User\n')
with self.assertRaises(UnknownAuthorFullnames):
self.cvs_clone('--authors=authors', '--stop-on-unknown-author')
|
Add test case for authors mappingfrom os.path import dirname, join
import subprocess
import unittest
from cvsgit.command.clone import Clone
from cvsgit.git import Git
from cvsgit.utils import Tempdir
from cvsgit.main import UnknownAuthorFullnames
class Test(unittest.TestCase):
def setUp(self):
self.tempdir = Tempdir(cwd=True)
self.tempdir.__enter__()
self.cvsdir = join(dirname(__file__), 'data', 'greek', 'tree')
self.gitdir = 'tree'
def tearDown(self):
self.tempdir.__exit__(None, None, None)
def cvs_clone(self, *args):
"""Clone CVS repo with default and additional arguments.
"""
args += (self.cvsdir, self.gitdir)
self.assertEquals(0, Clone().eval('--quiet', '--no-skip-latest', *args))
def git_authors(self):
"""Return author name and email addresses from git.
"""
return Git(self.gitdir).check_command('log', '--format=%an <%ae>',
stdout=subprocess.PIPE)
def test_clone_without_authors(self):
"""Clone without author mapping.
"""
self.cvs_clone()
self.assertEquals('uwe <uwe>', self.git_authors())
def test_clone_with_authors_name(self):
"""Clone with author fullname mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude\n')
self.cvs_clone('--domain=example.com', '--authors=authors')
self.assertEquals('Some Dude <uwe@example.com>', self.git_authors())
def test_clone_with_authors_name_and_email(self):
"""Clone with author fullname and email mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude <dude@example.com>\n')
self.cvs_clone('--authors=authors')
self.assertEquals('Some Dude <dude@example.com>', self.git_authors())
def test_clone_with_unknown_auhtor(self):
"""Clone with unknown author and --stop-on-unknown-author.
"""
with open('authors', 'w') as authors:
authors.write('nobody Non-existent User\n')
with self.assertRaises(UnknownAuthorFullnames):
self.cvs_clone('--authors=authors', '--stop-on-unknown-author')
|
<commit_before><commit_msg>Add test case for authors mapping<commit_after>from os.path import dirname, join
import subprocess
import unittest
from cvsgit.command.clone import Clone
from cvsgit.git import Git
from cvsgit.utils import Tempdir
from cvsgit.main import UnknownAuthorFullnames
class Test(unittest.TestCase):
def setUp(self):
self.tempdir = Tempdir(cwd=True)
self.tempdir.__enter__()
self.cvsdir = join(dirname(__file__), 'data', 'greek', 'tree')
self.gitdir = 'tree'
def tearDown(self):
self.tempdir.__exit__(None, None, None)
def cvs_clone(self, *args):
"""Clone CVS repo with default and additional arguments.
"""
args += (self.cvsdir, self.gitdir)
self.assertEquals(0, Clone().eval('--quiet', '--no-skip-latest', *args))
def git_authors(self):
"""Return author name and email addresses from git.
"""
return Git(self.gitdir).check_command('log', '--format=%an <%ae>',
stdout=subprocess.PIPE)
def test_clone_without_authors(self):
"""Clone without author mapping.
"""
self.cvs_clone()
self.assertEquals('uwe <uwe>', self.git_authors())
def test_clone_with_authors_name(self):
"""Clone with author fullname mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude\n')
self.cvs_clone('--domain=example.com', '--authors=authors')
self.assertEquals('Some Dude <uwe@example.com>', self.git_authors())
def test_clone_with_authors_name_and_email(self):
"""Clone with author fullname and email mapping.
"""
with open('authors', 'w') as authors:
authors.write('uwe Some Dude <dude@example.com>\n')
self.cvs_clone('--authors=authors')
self.assertEquals('Some Dude <dude@example.com>', self.git_authors())
def test_clone_with_unknown_auhtor(self):
"""Clone with unknown author and --stop-on-unknown-author.
"""
with open('authors', 'w') as authors:
authors.write('nobody Non-existent User\n')
with self.assertRaises(UnknownAuthorFullnames):
self.cvs_clone('--authors=authors', '--stop-on-unknown-author')
|
|
982d57604c95ce7f3dd4a422cee82f5bec2b6553
|
tests/test_plotting.py
|
tests/test_plotting.py
|
"""Integration tests for plotting tools."""
from emdp import examples
from emdp.gridworld import GridWorldPlotter
from emdp import actions
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def test_plotting_integration():
mdp = examples.build_SB_example35()
trajectories = []
for _ in range(3): # 3 trajectories
trajectory = [mdp.reset()]
for _ in range(10): # 10 steps maximum
state, reward, done, info = mdp.step(random.sample([actions.LEFT, actions.RIGHT,
actions.DOWN, actions.UP], 1)[0])
trajectory.append(state)
trajectories.append(trajectory)
gwp = GridWorldPlotter(mdp.size,
mdp.has_absorbing_state) # alternatively you can use GridWorldPlotter.from_mdp(mdp)
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(121)
# trajectory
gwp.plot_trajectories(ax, trajectories)
gwp.plot_grid(ax)
# heatmap
ax = fig.add_subplot(122)
gwp.plot_heatmap(ax, trajectories)
gwp.plot_grid(ax)
|
Add integration test for plotting utilities
|
Add integration test for plotting utilities
|
Python
|
mit
|
zafarali/emdp
|
Add integration test for plotting utilities
|
"""Integration tests for plotting tools."""
from emdp import examples
from emdp.gridworld import GridWorldPlotter
from emdp import actions
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def test_plotting_integration():
mdp = examples.build_SB_example35()
trajectories = []
for _ in range(3): # 3 trajectories
trajectory = [mdp.reset()]
for _ in range(10): # 10 steps maximum
state, reward, done, info = mdp.step(random.sample([actions.LEFT, actions.RIGHT,
actions.DOWN, actions.UP], 1)[0])
trajectory.append(state)
trajectories.append(trajectory)
gwp = GridWorldPlotter(mdp.size,
mdp.has_absorbing_state) # alternatively you can use GridWorldPlotter.from_mdp(mdp)
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(121)
# trajectory
gwp.plot_trajectories(ax, trajectories)
gwp.plot_grid(ax)
# heatmap
ax = fig.add_subplot(122)
gwp.plot_heatmap(ax, trajectories)
gwp.plot_grid(ax)
|
<commit_before><commit_msg>Add integration test for plotting utilities<commit_after>
|
"""Integration tests for plotting tools."""
from emdp import examples
from emdp.gridworld import GridWorldPlotter
from emdp import actions
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def test_plotting_integration():
mdp = examples.build_SB_example35()
trajectories = []
for _ in range(3): # 3 trajectories
trajectory = [mdp.reset()]
for _ in range(10): # 10 steps maximum
state, reward, done, info = mdp.step(random.sample([actions.LEFT, actions.RIGHT,
actions.DOWN, actions.UP], 1)[0])
trajectory.append(state)
trajectories.append(trajectory)
gwp = GridWorldPlotter(mdp.size,
mdp.has_absorbing_state) # alternatively you can use GridWorldPlotter.from_mdp(mdp)
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(121)
# trajectory
gwp.plot_trajectories(ax, trajectories)
gwp.plot_grid(ax)
# heatmap
ax = fig.add_subplot(122)
gwp.plot_heatmap(ax, trajectories)
gwp.plot_grid(ax)
|
Add integration test for plotting utilities"""Integration tests for plotting tools."""
from emdp import examples
from emdp.gridworld import GridWorldPlotter
from emdp import actions
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def test_plotting_integration():
mdp = examples.build_SB_example35()
trajectories = []
for _ in range(3): # 3 trajectories
trajectory = [mdp.reset()]
for _ in range(10): # 10 steps maximum
state, reward, done, info = mdp.step(random.sample([actions.LEFT, actions.RIGHT,
actions.DOWN, actions.UP], 1)[0])
trajectory.append(state)
trajectories.append(trajectory)
gwp = GridWorldPlotter(mdp.size,
mdp.has_absorbing_state) # alternatively you can use GridWorldPlotter.from_mdp(mdp)
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(121)
# trajectory
gwp.plot_trajectories(ax, trajectories)
gwp.plot_grid(ax)
# heatmap
ax = fig.add_subplot(122)
gwp.plot_heatmap(ax, trajectories)
gwp.plot_grid(ax)
|
<commit_before><commit_msg>Add integration test for plotting utilities<commit_after>"""Integration tests for plotting tools."""
from emdp import examples
from emdp.gridworld import GridWorldPlotter
from emdp import actions
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def test_plotting_integration():
mdp = examples.build_SB_example35()
trajectories = []
for _ in range(3): # 3 trajectories
trajectory = [mdp.reset()]
for _ in range(10): # 10 steps maximum
state, reward, done, info = mdp.step(random.sample([actions.LEFT, actions.RIGHT,
actions.DOWN, actions.UP], 1)[0])
trajectory.append(state)
trajectories.append(trajectory)
gwp = GridWorldPlotter(mdp.size,
mdp.has_absorbing_state) # alternatively you can use GridWorldPlotter.from_mdp(mdp)
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(121)
# trajectory
gwp.plot_trajectories(ax, trajectories)
gwp.plot_grid(ax)
# heatmap
ax = fig.add_subplot(122)
gwp.plot_heatmap(ax, trajectories)
gwp.plot_grid(ax)
|
|
74c67a6dc619d5da6dbff67c1679859c3ac26281
|
tests/test_tabulate.py
|
tests/test_tabulate.py
|
from pgcli.packages.tabulate import tabulate
from textwrap import dedent
def test_dont_strip_leading_whitespace():
data = [[' abc']]
headers = ['xyz']
tbl, _ = tabulate(data, headers, tablefmt='psql')
assert tbl == dedent('''
+---------+
| xyz |
|---------|
| abc |
+---------+ ''').strip()
|
Add a test written by darikg. Not actually executed, please confirm it.
|
Add a test written by darikg. Not actually executed, please confirm it.
|
Python
|
bsd-3-clause
|
dbcli/pgcli,koljonen/pgcli,dbcli/pgcli,darikg/pgcli,koljonen/pgcli,d33tah/pgcli,d33tah/pgcli,darikg/pgcli
|
Add a test written by darikg. Not actually executed, please confirm it.
|
from pgcli.packages.tabulate import tabulate
from textwrap import dedent
def test_dont_strip_leading_whitespace():
data = [[' abc']]
headers = ['xyz']
tbl, _ = tabulate(data, headers, tablefmt='psql')
assert tbl == dedent('''
+---------+
| xyz |
|---------|
| abc |
+---------+ ''').strip()
|
<commit_before><commit_msg>Add a test written by darikg. Not actually executed, please confirm it.<commit_after>
|
from pgcli.packages.tabulate import tabulate
from textwrap import dedent
def test_dont_strip_leading_whitespace():
data = [[' abc']]
headers = ['xyz']
tbl, _ = tabulate(data, headers, tablefmt='psql')
assert tbl == dedent('''
+---------+
| xyz |
|---------|
| abc |
+---------+ ''').strip()
|
Add a test written by darikg. Not actually executed, please confirm it.from pgcli.packages.tabulate import tabulate
from textwrap import dedent
def test_dont_strip_leading_whitespace():
data = [[' abc']]
headers = ['xyz']
tbl, _ = tabulate(data, headers, tablefmt='psql')
assert tbl == dedent('''
+---------+
| xyz |
|---------|
| abc |
+---------+ ''').strip()
|
<commit_before><commit_msg>Add a test written by darikg. Not actually executed, please confirm it.<commit_after>from pgcli.packages.tabulate import tabulate
from textwrap import dedent
def test_dont_strip_leading_whitespace():
data = [[' abc']]
headers = ['xyz']
tbl, _ = tabulate(data, headers, tablefmt='psql')
assert tbl == dedent('''
+---------+
| xyz |
|---------|
| abc |
+---------+ ''').strip()
|
|
23003c2ad9c69a198e2b84a3a1c59ab13b165eb0
|
send_sms.py
|
send_sms.py
|
from twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = "AC00b0db4128bdf9e869bed9ec08e8xxxx" //check your account
auth_token = "9887b585dd9f708da2a154f33cd4xxxx" //check your account
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+xxx", from_="+twilio_number",
body="Hello there!")
|
Send sms to phones via twilio api
|
Send sms to phones via twilio api
first install twilio using cmd pip install twilio after this step run the above script to send sms , update sid and token wrt your accounts
|
Python
|
mit
|
Naveenkhasyap/python_scripts,Naveenkhasyap/python_scripts
|
Send sms to phones via twilio api
first install twilio using cmd pip install twilio after this step run the above script to send sms , update sid and token wrt your accounts
|
from twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = "AC00b0db4128bdf9e869bed9ec08e8xxxx" //check your account
auth_token = "9887b585dd9f708da2a154f33cd4xxxx" //check your account
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+xxx", from_="+twilio_number",
body="Hello there!")
|
<commit_before><commit_msg>Send sms to phones via twilio api
first install twilio using cmd pip install twilio after this step run the above script to send sms , update sid and token wrt your accounts<commit_after>
|
from twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = "AC00b0db4128bdf9e869bed9ec08e8xxxx" //check your account
auth_token = "9887b585dd9f708da2a154f33cd4xxxx" //check your account
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+xxx", from_="+twilio_number",
body="Hello there!")
|
Send sms to phones via twilio api
first install twilio using cmd pip install twilio after this step run the above script to send sms , update sid and token wrt your accountsfrom twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = "AC00b0db4128bdf9e869bed9ec08e8xxxx" //check your account
auth_token = "9887b585dd9f708da2a154f33cd4xxxx" //check your account
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+xxx", from_="+twilio_number",
body="Hello there!")
|
<commit_before><commit_msg>Send sms to phones via twilio api
first install twilio using cmd pip install twilio after this step run the above script to send sms , update sid and token wrt your accounts<commit_after>from twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = "AC00b0db4128bdf9e869bed9ec08e8xxxx" //check your account
auth_token = "9887b585dd9f708da2a154f33cd4xxxx" //check your account
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+xxx", from_="+twilio_number",
body="Hello there!")
|
|
f75d8e315a290bb1b5b78cacfbd52191abef0fdf
|
javelin/structure.py
|
javelin/structure.py
|
import numpy as np
from pandas import DataFrame
from javelin.unitcell import UnitCell
class Structure(object):
def __init__(self):
self.unitcell = UnitCell()
self.atoms = DataFrame(columns=['i', 'j', 'k', 'site',
'Z', 'symbol',
'rel_x', 'rel_y', 'rel_z',
'x', 'y', 'z']).set_index(['i', 'j', 'k', 'site'])
self.molecules = {}
@property
def number_of_atoms(self):
return len(self.atoms)
def get_atom_symbols(self):
return self.atoms.symbol.unique()
def get_atom_Zs(self):
return self.atoms.Z.unique()
def get_atom_count(self):
return self.atoms.symbol.value_counts()
def get_atomic_numbers(self):
return self.atoms.Z.values
def get_chemical_symbols(self):
return self.atoms.symbol.values
def get_scaled_positions(self):
return np.array([self.atoms.x.values,
self.atoms.y.values,
self.atoms.z.values]).T
def get_positions(self):
return np.array([self.atoms.x.values * self.unitcell.a,
self.atoms.y.values * self.unitcell.b,
self.atoms.z.values * self.unitcell.c]).T
def add_atom(self, i=0, j=0, k=0, site=0, Z=None, symbol='', position=None):
Z, symbol = get_atomic_number_symbol(Z, symbol)
if position is None:
raise ValueError("position not provided")
position_x = position[0] + i
position_y = position[1] + j
position_z = position[2] + k
self.atoms.loc[i, j, k, site] = [Z, symbol,
position[0], position[1], position[2],
position_x, position_y, position_z]
def get_atomic_number_symbol(Z=None, symbol=''):
import periodictable
if symbol is '':
if Z is None:
raise ValueError("symbol and/or Z number not given")
else:
symbol = periodictable.elements[Z].symbol
else:
symbol = symbol.capitalize()
z = periodictable.elements.symbol(symbol).number
if Z is None:
Z = z
elif Z is not z:
raise ValueError("symbol and Z don't match")
return (Z, symbol)
|
Add Structure class using pandas DataFrame
|
Add Structure class using pandas DataFrame
|
Python
|
mit
|
rosswhitfield/javelin
|
Add Structure class using pandas DataFrame
|
import numpy as np
from pandas import DataFrame
from javelin.unitcell import UnitCell
class Structure(object):
def __init__(self):
self.unitcell = UnitCell()
self.atoms = DataFrame(columns=['i', 'j', 'k', 'site',
'Z', 'symbol',
'rel_x', 'rel_y', 'rel_z',
'x', 'y', 'z']).set_index(['i', 'j', 'k', 'site'])
self.molecules = {}
@property
def number_of_atoms(self):
return len(self.atoms)
def get_atom_symbols(self):
return self.atoms.symbol.unique()
def get_atom_Zs(self):
return self.atoms.Z.unique()
def get_atom_count(self):
return self.atoms.symbol.value_counts()
def get_atomic_numbers(self):
return self.atoms.Z.values
def get_chemical_symbols(self):
return self.atoms.symbol.values
def get_scaled_positions(self):
return np.array([self.atoms.x.values,
self.atoms.y.values,
self.atoms.z.values]).T
def get_positions(self):
return np.array([self.atoms.x.values * self.unitcell.a,
self.atoms.y.values * self.unitcell.b,
self.atoms.z.values * self.unitcell.c]).T
def add_atom(self, i=0, j=0, k=0, site=0, Z=None, symbol='', position=None):
Z, symbol = get_atomic_number_symbol(Z, symbol)
if position is None:
raise ValueError("position not provided")
position_x = position[0] + i
position_y = position[1] + j
position_z = position[2] + k
self.atoms.loc[i, j, k, site] = [Z, symbol,
position[0], position[1], position[2],
position_x, position_y, position_z]
def get_atomic_number_symbol(Z=None, symbol=''):
import periodictable
if symbol is '':
if Z is None:
raise ValueError("symbol and/or Z number not given")
else:
symbol = periodictable.elements[Z].symbol
else:
symbol = symbol.capitalize()
z = periodictable.elements.symbol(symbol).number
if Z is None:
Z = z
elif Z is not z:
raise ValueError("symbol and Z don't match")
return (Z, symbol)
|
<commit_before><commit_msg>Add Structure class using pandas DataFrame<commit_after>
|
import numpy as np
from pandas import DataFrame
from javelin.unitcell import UnitCell
class Structure(object):
def __init__(self):
self.unitcell = UnitCell()
self.atoms = DataFrame(columns=['i', 'j', 'k', 'site',
'Z', 'symbol',
'rel_x', 'rel_y', 'rel_z',
'x', 'y', 'z']).set_index(['i', 'j', 'k', 'site'])
self.molecules = {}
@property
def number_of_atoms(self):
return len(self.atoms)
def get_atom_symbols(self):
return self.atoms.symbol.unique()
def get_atom_Zs(self):
return self.atoms.Z.unique()
def get_atom_count(self):
return self.atoms.symbol.value_counts()
def get_atomic_numbers(self):
return self.atoms.Z.values
def get_chemical_symbols(self):
return self.atoms.symbol.values
def get_scaled_positions(self):
return np.array([self.atoms.x.values,
self.atoms.y.values,
self.atoms.z.values]).T
def get_positions(self):
return np.array([self.atoms.x.values * self.unitcell.a,
self.atoms.y.values * self.unitcell.b,
self.atoms.z.values * self.unitcell.c]).T
def add_atom(self, i=0, j=0, k=0, site=0, Z=None, symbol='', position=None):
Z, symbol = get_atomic_number_symbol(Z, symbol)
if position is None:
raise ValueError("position not provided")
position_x = position[0] + i
position_y = position[1] + j
position_z = position[2] + k
self.atoms.loc[i, j, k, site] = [Z, symbol,
position[0], position[1], position[2],
position_x, position_y, position_z]
def get_atomic_number_symbol(Z=None, symbol=''):
import periodictable
if symbol is '':
if Z is None:
raise ValueError("symbol and/or Z number not given")
else:
symbol = periodictable.elements[Z].symbol
else:
symbol = symbol.capitalize()
z = periodictable.elements.symbol(symbol).number
if Z is None:
Z = z
elif Z is not z:
raise ValueError("symbol and Z don't match")
return (Z, symbol)
|
Add Structure class using pandas DataFrameimport numpy as np
from pandas import DataFrame
from javelin.unitcell import UnitCell
class Structure(object):
def __init__(self):
self.unitcell = UnitCell()
self.atoms = DataFrame(columns=['i', 'j', 'k', 'site',
'Z', 'symbol',
'rel_x', 'rel_y', 'rel_z',
'x', 'y', 'z']).set_index(['i', 'j', 'k', 'site'])
self.molecules = {}
@property
def number_of_atoms(self):
return len(self.atoms)
def get_atom_symbols(self):
return self.atoms.symbol.unique()
def get_atom_Zs(self):
return self.atoms.Z.unique()
def get_atom_count(self):
return self.atoms.symbol.value_counts()
def get_atomic_numbers(self):
return self.atoms.Z.values
def get_chemical_symbols(self):
return self.atoms.symbol.values
def get_scaled_positions(self):
return np.array([self.atoms.x.values,
self.atoms.y.values,
self.atoms.z.values]).T
def get_positions(self):
return np.array([self.atoms.x.values * self.unitcell.a,
self.atoms.y.values * self.unitcell.b,
self.atoms.z.values * self.unitcell.c]).T
def add_atom(self, i=0, j=0, k=0, site=0, Z=None, symbol='', position=None):
Z, symbol = get_atomic_number_symbol(Z, symbol)
if position is None:
raise ValueError("position not provided")
position_x = position[0] + i
position_y = position[1] + j
position_z = position[2] + k
self.atoms.loc[i, j, k, site] = [Z, symbol,
position[0], position[1], position[2],
position_x, position_y, position_z]
def get_atomic_number_symbol(Z=None, symbol=''):
import periodictable
if symbol is '':
if Z is None:
raise ValueError("symbol and/or Z number not given")
else:
symbol = periodictable.elements[Z].symbol
else:
symbol = symbol.capitalize()
z = periodictable.elements.symbol(symbol).number
if Z is None:
Z = z
elif Z is not z:
raise ValueError("symbol and Z don't match")
return (Z, symbol)
|
<commit_before><commit_msg>Add Structure class using pandas DataFrame<commit_after>import numpy as np
from pandas import DataFrame
from javelin.unitcell import UnitCell
class Structure(object):
def __init__(self):
self.unitcell = UnitCell()
self.atoms = DataFrame(columns=['i', 'j', 'k', 'site',
'Z', 'symbol',
'rel_x', 'rel_y', 'rel_z',
'x', 'y', 'z']).set_index(['i', 'j', 'k', 'site'])
self.molecules = {}
@property
def number_of_atoms(self):
return len(self.atoms)
def get_atom_symbols(self):
return self.atoms.symbol.unique()
def get_atom_Zs(self):
return self.atoms.Z.unique()
def get_atom_count(self):
return self.atoms.symbol.value_counts()
def get_atomic_numbers(self):
return self.atoms.Z.values
def get_chemical_symbols(self):
return self.atoms.symbol.values
def get_scaled_positions(self):
return np.array([self.atoms.x.values,
self.atoms.y.values,
self.atoms.z.values]).T
def get_positions(self):
return np.array([self.atoms.x.values * self.unitcell.a,
self.atoms.y.values * self.unitcell.b,
self.atoms.z.values * self.unitcell.c]).T
def add_atom(self, i=0, j=0, k=0, site=0, Z=None, symbol='', position=None):
Z, symbol = get_atomic_number_symbol(Z, symbol)
if position is None:
raise ValueError("position not provided")
position_x = position[0] + i
position_y = position[1] + j
position_z = position[2] + k
self.atoms.loc[i, j, k, site] = [Z, symbol,
position[0], position[1], position[2],
position_x, position_y, position_z]
def get_atomic_number_symbol(Z=None, symbol=''):
import periodictable
if symbol is '':
if Z is None:
raise ValueError("symbol and/or Z number not given")
else:
symbol = periodictable.elements[Z].symbol
else:
symbol = symbol.capitalize()
z = periodictable.elements.symbol(symbol).number
if Z is None:
Z = z
elif Z is not z:
raise ValueError("symbol and Z don't match")
return (Z, symbol)
|
|
dec17e4d8eb88610fbe81aeef84ea41b76b0e398
|
ListItems.py
|
ListItems.py
|
import os
import json
import re
###############################################
# Run this from the root of the assets folder #
###############################################
# Some code from http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
# "Parse a JSON file with comments"
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(content):
""" Parse a JSON file
First remove comments and then use the json module package
"""
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Replace -. with -0.
content = content.replace("-.", "-0.")
# Return json file
return json.loads(content)
def getPath(root, file):
path = os.path.join(root, file)
return path[2:].replace("\\", "/")
imageKeys = ["inventoryIcon", "image"]
def getImageFromData(data):
# Not all items just have an "inventoryIcon"
for key in imageKeys:
if key in data:
return data[key]
return None
def getItemDetails(root, filename):
itemPath = getPath(root, filename)
#print(itemPath)
fh = open(itemPath, "r")
data = parse_json(fh.read())
# It will either have objectName or itemName
try:
itemName = data["itemName"]
except KeyError:
itemName = data["objectName"]
# Not all items have a static inventory icon
image = getImageFromData(data)
if image is not None:
iconPath = getPath(root, image)
else:
print("No image for: " + itemPath)
iconPath = None
return itemName, { "itemPath": itemPath, "iconPath": iconPath }
def getAllItems(itemType):
itemDict = {}
extension = "." + itemType
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(extension):
itemName, details = getItemDetails(root, file)
itemDict[itemName] = details
return itemDict
allItems = {}
# "generatedsword", "generatedgun", "generatedshield", "codexitem"
itemTypes = [
"item",
"matitem", "miningtool", "flashlight", "wiretool", "beamaxe",
"tillingtool", "painttool", "gun", "sword", "harvestingtool",
"head", "chest", "legs", "back", "coinitem", "consumable",
"blueprint", "techitem", "instrument", "grapplinghook",
"thrownitem", "celestial", "object"
]
for itemType in itemTypes:
allItems[itemType] = getAllItems(itemType)
outFile = open("items.json", "w")
json.dump(allItems, outFile)
print("All items written to items.json")
|
Add a python script to list all items
|
Add a python script to list all items
Saves the item name, item path, and image path into "items.json"
|
Python
|
mit
|
McSimp/starbound-research
|
Add a python script to list all items
Saves the item name, item path, and image path into "items.json"
|
import os
import json
import re
###############################################
# Run this from the root of the assets folder #
###############################################
# Some code from http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
# "Parse a JSON file with comments"
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(content):
""" Parse a JSON file
First remove comments and then use the json module package
"""
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Replace -. with -0.
content = content.replace("-.", "-0.")
# Return json file
return json.loads(content)
def getPath(root, file):
path = os.path.join(root, file)
return path[2:].replace("\\", "/")
imageKeys = ["inventoryIcon", "image"]
def getImageFromData(data):
# Not all items just have an "inventoryIcon"
for key in imageKeys:
if key in data:
return data[key]
return None
def getItemDetails(root, filename):
itemPath = getPath(root, filename)
#print(itemPath)
fh = open(itemPath, "r")
data = parse_json(fh.read())
# It will either have objectName or itemName
try:
itemName = data["itemName"]
except KeyError:
itemName = data["objectName"]
# Not all items have a static inventory icon
image = getImageFromData(data)
if image is not None:
iconPath = getPath(root, image)
else:
print("No image for: " + itemPath)
iconPath = None
return itemName, { "itemPath": itemPath, "iconPath": iconPath }
def getAllItems(itemType):
itemDict = {}
extension = "." + itemType
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(extension):
itemName, details = getItemDetails(root, file)
itemDict[itemName] = details
return itemDict
allItems = {}
# "generatedsword", "generatedgun", "generatedshield", "codexitem"
itemTypes = [
"item",
"matitem", "miningtool", "flashlight", "wiretool", "beamaxe",
"tillingtool", "painttool", "gun", "sword", "harvestingtool",
"head", "chest", "legs", "back", "coinitem", "consumable",
"blueprint", "techitem", "instrument", "grapplinghook",
"thrownitem", "celestial", "object"
]
for itemType in itemTypes:
allItems[itemType] = getAllItems(itemType)
outFile = open("items.json", "w")
json.dump(allItems, outFile)
print("All items written to items.json")
|
<commit_before><commit_msg>Add a python script to list all items
Saves the item name, item path, and image path into "items.json"<commit_after>
|
import os
import json
import re
###############################################
# Run this from the root of the assets folder #
###############################################
# Some code from http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
# "Parse a JSON file with comments"
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(content):
""" Parse a JSON file
First remove comments and then use the json module package
"""
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Replace -. with -0.
content = content.replace("-.", "-0.")
# Return json file
return json.loads(content)
def getPath(root, file):
path = os.path.join(root, file)
return path[2:].replace("\\", "/")
imageKeys = ["inventoryIcon", "image"]
def getImageFromData(data):
# Not all items just have an "inventoryIcon"
for key in imageKeys:
if key in data:
return data[key]
return None
def getItemDetails(root, filename):
itemPath = getPath(root, filename)
#print(itemPath)
fh = open(itemPath, "r")
data = parse_json(fh.read())
# It will either have objectName or itemName
try:
itemName = data["itemName"]
except KeyError:
itemName = data["objectName"]
# Not all items have a static inventory icon
image = getImageFromData(data)
if image is not None:
iconPath = getPath(root, image)
else:
print("No image for: " + itemPath)
iconPath = None
return itemName, { "itemPath": itemPath, "iconPath": iconPath }
def getAllItems(itemType):
itemDict = {}
extension = "." + itemType
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(extension):
itemName, details = getItemDetails(root, file)
itemDict[itemName] = details
return itemDict
allItems = {}
# "generatedsword", "generatedgun", "generatedshield", "codexitem"
itemTypes = [
"item",
"matitem", "miningtool", "flashlight", "wiretool", "beamaxe",
"tillingtool", "painttool", "gun", "sword", "harvestingtool",
"head", "chest", "legs", "back", "coinitem", "consumable",
"blueprint", "techitem", "instrument", "grapplinghook",
"thrownitem", "celestial", "object"
]
for itemType in itemTypes:
allItems[itemType] = getAllItems(itemType)
outFile = open("items.json", "w")
json.dump(allItems, outFile)
print("All items written to items.json")
|
Add a python script to list all items
Saves the item name, item path, and image path into "items.json"import os
import json
import re
###############################################
# Run this from the root of the assets folder #
###############################################
# Some code from http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
# "Parse a JSON file with comments"
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(content):
""" Parse a JSON file
First remove comments and then use the json module package
"""
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Replace -. with -0.
content = content.replace("-.", "-0.")
# Return json file
return json.loads(content)
def getPath(root, file):
path = os.path.join(root, file)
return path[2:].replace("\\", "/")
imageKeys = ["inventoryIcon", "image"]
def getImageFromData(data):
# Not all items just have an "inventoryIcon"
for key in imageKeys:
if key in data:
return data[key]
return None
def getItemDetails(root, filename):
itemPath = getPath(root, filename)
#print(itemPath)
fh = open(itemPath, "r")
data = parse_json(fh.read())
# It will either have objectName or itemName
try:
itemName = data["itemName"]
except KeyError:
itemName = data["objectName"]
# Not all items have a static inventory icon
image = getImageFromData(data)
if image is not None:
iconPath = getPath(root, image)
else:
print("No image for: " + itemPath)
iconPath = None
return itemName, { "itemPath": itemPath, "iconPath": iconPath }
def getAllItems(itemType):
itemDict = {}
extension = "." + itemType
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(extension):
itemName, details = getItemDetails(root, file)
itemDict[itemName] = details
return itemDict
allItems = {}
# "generatedsword", "generatedgun", "generatedshield", "codexitem"
itemTypes = [
"item",
"matitem", "miningtool", "flashlight", "wiretool", "beamaxe",
"tillingtool", "painttool", "gun", "sword", "harvestingtool",
"head", "chest", "legs", "back", "coinitem", "consumable",
"blueprint", "techitem", "instrument", "grapplinghook",
"thrownitem", "celestial", "object"
]
for itemType in itemTypes:
allItems[itemType] = getAllItems(itemType)
outFile = open("items.json", "w")
json.dump(allItems, outFile)
print("All items written to items.json")
|
<commit_before><commit_msg>Add a python script to list all items
Saves the item name, item path, and image path into "items.json"<commit_after>import os
import json
import re
###############################################
# Run this from the root of the assets folder #
###############################################
# Some code from http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
# "Parse a JSON file with comments"
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(content):
""" Parse a JSON file
First remove comments and then use the json module package
"""
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Replace -. with -0.
content = content.replace("-.", "-0.")
# Return json file
return json.loads(content)
def getPath(root, file):
path = os.path.join(root, file)
return path[2:].replace("\\", "/")
imageKeys = ["inventoryIcon", "image"]
def getImageFromData(data):
# Not all items just have an "inventoryIcon"
for key in imageKeys:
if key in data:
return data[key]
return None
def getItemDetails(root, filename):
itemPath = getPath(root, filename)
#print(itemPath)
fh = open(itemPath, "r")
data = parse_json(fh.read())
# It will either have objectName or itemName
try:
itemName = data["itemName"]
except KeyError:
itemName = data["objectName"]
# Not all items have a static inventory icon
image = getImageFromData(data)
if image is not None:
iconPath = getPath(root, image)
else:
print("No image for: " + itemPath)
iconPath = None
return itemName, { "itemPath": itemPath, "iconPath": iconPath }
def getAllItems(itemType):
itemDict = {}
extension = "." + itemType
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(extension):
itemName, details = getItemDetails(root, file)
itemDict[itemName] = details
return itemDict
allItems = {}
# "generatedsword", "generatedgun", "generatedshield", "codexitem"
itemTypes = [
"item",
"matitem", "miningtool", "flashlight", "wiretool", "beamaxe",
"tillingtool", "painttool", "gun", "sword", "harvestingtool",
"head", "chest", "legs", "back", "coinitem", "consumable",
"blueprint", "techitem", "instrument", "grapplinghook",
"thrownitem", "celestial", "object"
]
for itemType in itemTypes:
allItems[itemType] = getAllItems(itemType)
outFile = open("items.json", "w")
json.dump(allItems, outFile)
print("All items written to items.json")
|
|
83a2a04ec5b416e68588142ececb055d646a5449
|
nose2/tests/functional/__init__.py
|
nose2/tests/functional/__init__.py
|
import os
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
|
import os
import subprocess
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
def run_nose2(*nose2_args, **popen_args):
if 'cwd' in popen_args:
cwd = popen_args.pop('cwd')
if not os.path.isabs(cwd):
popen_args['cwd'] = support_file(cwd)
process = subprocess.Popen(
['python', '-m', 'nose2.__main__'] + list(nose2_args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
output, err = process.communicate()
retcode = process.poll()
return retcode, output, err
|
Add utility function for executing test runs
|
Add utility function for executing test runs
|
Python
|
bsd-2-clause
|
ezigman/nose2,ojengwa/nose2,leth/nose2,leth/nose2,little-dude/nose2,little-dude/nose2,ptthiem/nose2,ojengwa/nose2,ptthiem/nose2,ezigman/nose2
|
import os
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
Add utility function for executing test runs
|
import os
import subprocess
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
def run_nose2(*nose2_args, **popen_args):
if 'cwd' in popen_args:
cwd = popen_args.pop('cwd')
if not os.path.isabs(cwd):
popen_args['cwd'] = support_file(cwd)
process = subprocess.Popen(
['python', '-m', 'nose2.__main__'] + list(nose2_args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
output, err = process.communicate()
retcode = process.poll()
return retcode, output, err
|
<commit_before>import os
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
<commit_msg>Add utility function for executing test runs<commit_after>
|
import os
import subprocess
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
def run_nose2(*nose2_args, **popen_args):
if 'cwd' in popen_args:
cwd = popen_args.pop('cwd')
if not os.path.isabs(cwd):
popen_args['cwd'] = support_file(cwd)
process = subprocess.Popen(
['python', '-m', 'nose2.__main__'] + list(nose2_args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
output, err = process.communicate()
retcode = process.poll()
return retcode, output, err
|
import os
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
Add utility function for executing test runsimport os
import subprocess
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
def run_nose2(*nose2_args, **popen_args):
if 'cwd' in popen_args:
cwd = popen_args.pop('cwd')
if not os.path.isabs(cwd):
popen_args['cwd'] = support_file(cwd)
process = subprocess.Popen(
['python', '-m', 'nose2.__main__'] + list(nose2_args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
output, err = process.communicate()
retcode = process.poll()
return retcode, output, err
|
<commit_before>import os
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
<commit_msg>Add utility function for executing test runs<commit_after>import os
import subprocess
SUPPORT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'support'))
def support_file(*path_parts):
return os.path.join(SUPPORT, *path_parts)
def run_nose2(*nose2_args, **popen_args):
if 'cwd' in popen_args:
cwd = popen_args.pop('cwd')
if not os.path.isabs(cwd):
popen_args['cwd'] = support_file(cwd)
process = subprocess.Popen(
['python', '-m', 'nose2.__main__'] + list(nose2_args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
output, err = process.communicate()
retcode = process.poll()
return retcode, output, err
|
21bdda2f1a001dde6ed2aea34d3dccd7e63a5a37
|
src_py/writeGPIO.py
|
src_py/writeGPIO.py
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
while 1:
GPIO.output(9, True)
sleep(0.25)
GPIO.output(9, False)
sleep(0.25)
GPIO.output(11, True)
sleep(0.25)
GPIO.output(11, False)
sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
GPIO.cleanup()
|
Write in gpio on raspberry
|
Write in gpio on raspberry
|
Python
|
mit
|
nich2000/ncs,nich2000/ncs,nich2000/ncs,nich2000/ncs,nich2000/ncs,nich2000/ncs,nich2000/ncs
|
Write in gpio on raspberry
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
while 1:
GPIO.output(9, True)
sleep(0.25)
GPIO.output(9, False)
sleep(0.25)
GPIO.output(11, True)
sleep(0.25)
GPIO.output(11, False)
sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
GPIO.cleanup()
|
<commit_before><commit_msg>Write in gpio on raspberry<commit_after>
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
while 1:
GPIO.output(9, True)
sleep(0.25)
GPIO.output(9, False)
sleep(0.25)
GPIO.output(11, True)
sleep(0.25)
GPIO.output(11, False)
sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
GPIO.cleanup()
|
Write in gpio on raspberryimport RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
while 1:
GPIO.output(9, True)
sleep(0.25)
GPIO.output(9, False)
sleep(0.25)
GPIO.output(11, True)
sleep(0.25)
GPIO.output(11, False)
sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
GPIO.cleanup()
|
<commit_before><commit_msg>Write in gpio on raspberry<commit_after>import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
while 1:
GPIO.output(9, True)
sleep(0.25)
GPIO.output(9, False)
sleep(0.25)
GPIO.output(11, True)
sleep(0.25)
GPIO.output(11, False)
sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
#GPIO.output(11, True)
#sleep(0.25)
#GPIO.output(11, False)
#sleep(0.25)
GPIO.cleanup()
|
|
89e8b3ef400c6025161c3af819c15ad8c7b74425
|
pyramid_authsanity/sources.py
|
pyramid_authsanity/sources.py
|
from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
|
Add a session based source
|
Add a session based source
This will pull the auth information from the current session.
|
Python
|
isc
|
usingnamespace/pyramid_authsanity
|
Add a session based source
This will pull the auth information from the current session.
|
from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
|
<commit_before><commit_msg>Add a session based source
This will pull the auth information from the current session.<commit_after>
|
from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
|
Add a session based source
This will pull the auth information from the current session.from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
|
<commit_before><commit_msg>Add a session based source
This will pull the auth information from the current session.<commit_after>from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
|
|
1adbe7b5974403ac58963679d2b945cf94c36826
|
test/tests_and_pull.py
|
test/tests_and_pull.py
|
#!/usr/bin/env python
import json, argparse
import httplib, subprocess, os
def execute(args):
IP = args.agent.split(':')[0]
if len(args.agent.split(':')) == 2:
port = args.agent.split(':')[1]
else:
port = 80
agent = 'http://' + args.agent
with open(args.config, 'r') as f:
config = f.read()
print agent
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(IP, port)
conn.request("POST", "", config, headers)
response = conn.getresponse()
if not args.harp:
print response.status, response.reason
data = response.read()
if response.status != 200:
print data
return
responseJson = json.loads(data)
if not args.noresult and not args.harp:
os.makedirs(responseJson['job-id'])
if args.analyze and args.final:
for f in responseJson['final-hars']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
for f in responseJson['files']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
if args.analyze and args.harp:
for f in responseJson['final-hars']:
fUrl = agent+f+'p'
print fUrl
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='A handy tool to perform remote test and retrieve result files')
parser.add_argument('agent', help='IPaddress:PORT of agent, no "http"')
parser.add_argument('config', help='Path to the config file')
parser.add_argument('-a', '--analyze', action='store_true', default=False, help='perform TCP analyze on remote test agent as well')
parser.add_argument('-n', '--noresult', action='store_true', default=False, help='don not try to download result file at all')
parser.add_argument('-f', '--final', action='store_true', default=False, help='only download the final har, if "--analyze" is used')
parser.add_argument('-p', '--harp', action='store_true', default=False, help='just print out the urls of the final har in jsonp format, if "--analyze" is used')
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
main()
|
Add a handy CLI test client
|
Add a handy CLI test client
|
Python
|
apache-2.0
|
eaufavor/chrome-webpage-profiler-webui,eaufavor/chrome-webpage-profiler-webui,eaufavor/chrome-webpage-profiler-webui,eaufavor/chrome-webpage-profiler-webui
|
Add a handy CLI test client
|
#!/usr/bin/env python
import json, argparse
import httplib, subprocess, os
def execute(args):
IP = args.agent.split(':')[0]
if len(args.agent.split(':')) == 2:
port = args.agent.split(':')[1]
else:
port = 80
agent = 'http://' + args.agent
with open(args.config, 'r') as f:
config = f.read()
print agent
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(IP, port)
conn.request("POST", "", config, headers)
response = conn.getresponse()
if not args.harp:
print response.status, response.reason
data = response.read()
if response.status != 200:
print data
return
responseJson = json.loads(data)
if not args.noresult and not args.harp:
os.makedirs(responseJson['job-id'])
if args.analyze and args.final:
for f in responseJson['final-hars']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
for f in responseJson['files']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
if args.analyze and args.harp:
for f in responseJson['final-hars']:
fUrl = agent+f+'p'
print fUrl
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='A handy tool to perform remote test and retrieve result files')
parser.add_argument('agent', help='IPaddress:PORT of agent, no "http"')
parser.add_argument('config', help='Path to the config file')
parser.add_argument('-a', '--analyze', action='store_true', default=False, help='perform TCP analyze on remote test agent as well')
parser.add_argument('-n', '--noresult', action='store_true', default=False, help='don not try to download result file at all')
parser.add_argument('-f', '--final', action='store_true', default=False, help='only download the final har, if "--analyze" is used')
parser.add_argument('-p', '--harp', action='store_true', default=False, help='just print out the urls of the final har in jsonp format, if "--analyze" is used')
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a handy CLI test client<commit_after>
|
#!/usr/bin/env python
import json, argparse
import httplib, subprocess, os
def execute(args):
IP = args.agent.split(':')[0]
if len(args.agent.split(':')) == 2:
port = args.agent.split(':')[1]
else:
port = 80
agent = 'http://' + args.agent
with open(args.config, 'r') as f:
config = f.read()
print agent
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(IP, port)
conn.request("POST", "", config, headers)
response = conn.getresponse()
if not args.harp:
print response.status, response.reason
data = response.read()
if response.status != 200:
print data
return
responseJson = json.loads(data)
if not args.noresult and not args.harp:
os.makedirs(responseJson['job-id'])
if args.analyze and args.final:
for f in responseJson['final-hars']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
for f in responseJson['files']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
if args.analyze and args.harp:
for f in responseJson['final-hars']:
fUrl = agent+f+'p'
print fUrl
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='A handy tool to perform remote test and retrieve result files')
parser.add_argument('agent', help='IPaddress:PORT of agent, no "http"')
parser.add_argument('config', help='Path to the config file')
parser.add_argument('-a', '--analyze', action='store_true', default=False, help='perform TCP analyze on remote test agent as well')
parser.add_argument('-n', '--noresult', action='store_true', default=False, help='don not try to download result file at all')
parser.add_argument('-f', '--final', action='store_true', default=False, help='only download the final har, if "--analyze" is used')
parser.add_argument('-p', '--harp', action='store_true', default=False, help='just print out the urls of the final har in jsonp format, if "--analyze" is used')
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
main()
|
Add a handy CLI test client#!/usr/bin/env python
import json, argparse
import httplib, subprocess, os
def execute(args):
IP = args.agent.split(':')[0]
if len(args.agent.split(':')) == 2:
port = args.agent.split(':')[1]
else:
port = 80
agent = 'http://' + args.agent
with open(args.config, 'r') as f:
config = f.read()
print agent
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(IP, port)
conn.request("POST", "", config, headers)
response = conn.getresponse()
if not args.harp:
print response.status, response.reason
data = response.read()
if response.status != 200:
print data
return
responseJson = json.loads(data)
if not args.noresult and not args.harp:
os.makedirs(responseJson['job-id'])
if args.analyze and args.final:
for f in responseJson['final-hars']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
for f in responseJson['files']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
if args.analyze and args.harp:
for f in responseJson['final-hars']:
fUrl = agent+f+'p'
print fUrl
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='A handy tool to perform remote test and retrieve result files')
parser.add_argument('agent', help='IPaddress:PORT of agent, no "http"')
parser.add_argument('config', help='Path to the config file')
parser.add_argument('-a', '--analyze', action='store_true', default=False, help='perform TCP analyze on remote test agent as well')
parser.add_argument('-n', '--noresult', action='store_true', default=False, help='don not try to download result file at all')
parser.add_argument('-f', '--final', action='store_true', default=False, help='only download the final har, if "--analyze" is used')
parser.add_argument('-p', '--harp', action='store_true', default=False, help='just print out the urls of the final har in jsonp format, if "--analyze" is used')
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a handy CLI test client<commit_after>#!/usr/bin/env python
import json, argparse
import httplib, subprocess, os
def execute(args):
IP = args.agent.split(':')[0]
if len(args.agent.split(':')) == 2:
port = args.agent.split(':')[1]
else:
port = 80
agent = 'http://' + args.agent
with open(args.config, 'r') as f:
config = f.read()
print agent
headers = {"Content-type": "application/json"}
conn = httplib.HTTPConnection(IP, port)
conn.request("POST", "", config, headers)
response = conn.getresponse()
if not args.harp:
print response.status, response.reason
data = response.read()
if response.status != 200:
print data
return
responseJson = json.loads(data)
if not args.noresult and not args.harp:
os.makedirs(responseJson['job-id'])
if args.analyze and args.final:
for f in responseJson['final-hars']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
for f in responseJson['files']:
fUrl = agent+f
p = subprocess.Popen(['wget', '-nv', fUrl], cwd=responseJson['job-id'])
rc = p.wait()
if rc != 0:
print 'Download %s failed' % fUrl
if args.analyze and args.harp:
for f in responseJson['final-hars']:
fUrl = agent+f+'p'
print fUrl
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='A handy tool to perform remote test and retrieve result files')
parser.add_argument('agent', help='IPaddress:PORT of agent, no "http"')
parser.add_argument('config', help='Path to the config file')
parser.add_argument('-a', '--analyze', action='store_true', default=False, help='perform TCP analyze on remote test agent as well')
parser.add_argument('-n', '--noresult', action='store_true', default=False, help='don not try to download result file at all')
parser.add_argument('-f', '--final', action='store_true', default=False, help='only download the final har, if "--analyze" is used')
parser.add_argument('-p', '--harp', action='store_true', default=False, help='just print out the urls of the final har in jsonp format, if "--analyze" is used')
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
main()
|
|
0774f8d159e9f749b89416bbdf0fe394e8083f6a
|
tools/usbd_vcp_test.py
|
tools/usbd_vcp_test.py
|
#!/usr/bin/env python2.7
import sys, serial, struct
port = '/dev/ttyACM0'
sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
sp.setDTR(True) # dsrdtr is ignored on Windows.
sp.write("snap")
sp.flush()
size = struct.unpack('<L', sp.read(4))[0]
img = sp.read(size)
sp.close()
with open("img.jpg", "w") as f:
f.write(img)
|
Add USB VCP test script.
|
Add USB VCP test script.
|
Python
|
mit
|
kwagyeman/openmv,kwagyeman/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,iabdalkader/openmv
|
Add USB VCP test script.
|
#!/usr/bin/env python2.7
import sys, serial, struct
port = '/dev/ttyACM0'
sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
sp.setDTR(True) # dsrdtr is ignored on Windows.
sp.write("snap")
sp.flush()
size = struct.unpack('<L', sp.read(4))[0]
img = sp.read(size)
sp.close()
with open("img.jpg", "w") as f:
f.write(img)
|
<commit_before><commit_msg>Add USB VCP test script.<commit_after>
|
#!/usr/bin/env python2.7
import sys, serial, struct
port = '/dev/ttyACM0'
sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
sp.setDTR(True) # dsrdtr is ignored on Windows.
sp.write("snap")
sp.flush()
size = struct.unpack('<L', sp.read(4))[0]
img = sp.read(size)
sp.close()
with open("img.jpg", "w") as f:
f.write(img)
|
Add USB VCP test script.#!/usr/bin/env python2.7
import sys, serial, struct
port = '/dev/ttyACM0'
sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
sp.setDTR(True) # dsrdtr is ignored on Windows.
sp.write("snap")
sp.flush()
size = struct.unpack('<L', sp.read(4))[0]
img = sp.read(size)
sp.close()
with open("img.jpg", "w") as f:
f.write(img)
|
<commit_before><commit_msg>Add USB VCP test script.<commit_after>#!/usr/bin/env python2.7
import sys, serial, struct
port = '/dev/ttyACM0'
sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
sp.setDTR(True) # dsrdtr is ignored on Windows.
sp.write("snap")
sp.flush()
size = struct.unpack('<L', sp.read(4))[0]
img = sp.read(size)
sp.close()
with open("img.jpg", "w") as f:
f.write(img)
|
|
fcbb6f845cce5c5a4bb996cf394ecbe2d33fdbfa
|
tests/ep_canvas_test.py
|
tests/ep_canvas_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
from catplot.ep_components.ep_canvas import EPCanvas
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add test case for EPCanvas.
|
Add test case for EPCanvas.
|
Python
|
mit
|
PytLab/catplot
|
Add test case for EPCanvas.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
from catplot.ep_components.ep_canvas import EPCanvas
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add test case for EPCanvas.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
from catplot.ep_components.ep_canvas import EPCanvas
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add test case for EPCanvas.#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
from catplot.ep_components.ep_canvas import EPCanvas
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add test case for EPCanvas.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
from catplot.ep_components.ep_canvas import EPCanvas
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
437b9c92e59215b41464f54a4040bf1be9d41d2d
|
create_window.py
|
create_window.py
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
pygame.display.flip()
#-- RUN LOOP --------------------------------------->>>
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
Add module capable of creating a window when ran
|
Add module capable of creating a window when ran
|
Python
|
mit
|
withtwoemms/pygame-explorations
|
Add module capable of creating a window when ran
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
pygame.display.flip()
#-- RUN LOOP --------------------------------------->>>
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
<commit_before><commit_msg>Add module capable of creating a window when ran<commit_after>
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
pygame.display.flip()
#-- RUN LOOP --------------------------------------->>>
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
Add module capable of creating a window when ranimport pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
pygame.display.flip()
#-- RUN LOOP --------------------------------------->>>
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
<commit_before><commit_msg>Add module capable of creating a window when ran<commit_after>import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
pygame.display.flip()
#-- RUN LOOP --------------------------------------->>>
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
|
fdd8aecba5aea0faae5c17062af33d61f70f0e70
|
test/benchmark/decorate.py
|
test/benchmark/decorate.py
|
from __future__ import absolute_import
import functools
import black_magic.decorator
from test.benchmark import _common
class Functools(_common.Base):
def __init__(self):
self._decorator = functools.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
class BlackMagicDecorator(_common.Base):
def __init__(self):
self._decorator = black_magic.decorator.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
if __name__ == '__main__':
_common.main(Functools, BlackMagicDecorator)
|
Add some random benchmark module
|
Add some random benchmark module
This module existed for some time in my local working dir. TBH, I forgot
about its precise purpose, but it doesn't really hurt to add it right
now, so whatever…
|
Python
|
unlicense
|
coldfix/black-magic
|
Add some random benchmark module
This module existed for some time in my local working dir. TBH, I forgot
about its precise purpose, but it doesn't really hurt to add it right
now, so whatever…
|
from __future__ import absolute_import
import functools
import black_magic.decorator
from test.benchmark import _common
class Functools(_common.Base):
def __init__(self):
self._decorator = functools.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
class BlackMagicDecorator(_common.Base):
def __init__(self):
self._decorator = black_magic.decorator.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
if __name__ == '__main__':
_common.main(Functools, BlackMagicDecorator)
|
<commit_before><commit_msg>Add some random benchmark module
This module existed for some time in my local working dir. TBH, I forgot
about its precise purpose, but it doesn't really hurt to add it right
now, so whatever…<commit_after>
|
from __future__ import absolute_import
import functools
import black_magic.decorator
from test.benchmark import _common
class Functools(_common.Base):
def __init__(self):
self._decorator = functools.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
class BlackMagicDecorator(_common.Base):
def __init__(self):
self._decorator = black_magic.decorator.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
if __name__ == '__main__':
_common.main(Functools, BlackMagicDecorator)
|
Add some random benchmark module
This module existed for some time in my local working dir. TBH, I forgot
about its precise purpose, but it doesn't really hurt to add it right
now, so whatever…from __future__ import absolute_import
import functools
import black_magic.decorator
from test.benchmark import _common
class Functools(_common.Base):
def __init__(self):
self._decorator = functools.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
class BlackMagicDecorator(_common.Base):
def __init__(self):
self._decorator = black_magic.decorator.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
if __name__ == '__main__':
_common.main(Functools, BlackMagicDecorator)
|
<commit_before><commit_msg>Add some random benchmark module
This module existed for some time in my local working dir. TBH, I forgot
about its precise purpose, but it doesn't really hurt to add it right
now, so whatever…<commit_after>from __future__ import absolute_import
import functools
import black_magic.decorator
from test.benchmark import _common
class Functools(_common.Base):
def __init__(self):
self._decorator = functools.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
class BlackMagicDecorator(_common.Base):
def __init__(self):
self._decorator = black_magic.decorator.wraps(_common.wrap)
def __call__(self):
self._decorator(_common.func)
if __name__ == '__main__':
_common.main(Functools, BlackMagicDecorator)
|
|
49ae354a13c33cceffe616b0906819257432318c
|
RpiAir/client_sds011.py
|
RpiAir/client_sds011.py
|
import sys
from sds011 import Sds011Reader
from mqttsender import client
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/ttyUSB0'
sds011 = Sds011Reader(port)
for pm25, pm10, ok in sds011.read_forever3():
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
msg = '{},{},{}'.format(pm25, pm10, "OK" if ok else "NOK")
(rc, mid) = client.publish("sensor/sds011", msg, qos=0)
|
Add first sensor reader (SDS011) client script
|
Add first sensor reader (SDS011) client script
|
Python
|
mit
|
aapris/VekotinVerstas,aapris/VekotinVerstas
|
Add first sensor reader (SDS011) client script
|
import sys
from sds011 import Sds011Reader
from mqttsender import client
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/ttyUSB0'
sds011 = Sds011Reader(port)
for pm25, pm10, ok in sds011.read_forever3():
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
msg = '{},{},{}'.format(pm25, pm10, "OK" if ok else "NOK")
(rc, mid) = client.publish("sensor/sds011", msg, qos=0)
|
<commit_before><commit_msg>Add first sensor reader (SDS011) client script<commit_after>
|
import sys
from sds011 import Sds011Reader
from mqttsender import client
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/ttyUSB0'
sds011 = Sds011Reader(port)
for pm25, pm10, ok in sds011.read_forever3():
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
msg = '{},{},{}'.format(pm25, pm10, "OK" if ok else "NOK")
(rc, mid) = client.publish("sensor/sds011", msg, qos=0)
|
Add first sensor reader (SDS011) client scriptimport sys
from sds011 import Sds011Reader
from mqttsender import client
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/ttyUSB0'
sds011 = Sds011Reader(port)
for pm25, pm10, ok in sds011.read_forever3():
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
msg = '{},{},{}'.format(pm25, pm10, "OK" if ok else "NOK")
(rc, mid) = client.publish("sensor/sds011", msg, qos=0)
|
<commit_before><commit_msg>Add first sensor reader (SDS011) client script<commit_after>import sys
from sds011 import Sds011Reader
from mqttsender import client
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = '/dev/ttyUSB0'
sds011 = Sds011Reader(port)
for pm25, pm10, ok in sds011.read_forever3():
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
msg = '{},{},{}'.format(pm25, pm10, "OK" if ok else "NOK")
(rc, mid) = client.publish("sensor/sds011", msg, qos=0)
|
|
5063bed38d64843387a681f72734b3cc1e9d6394
|
tools/create_images_xml.py
|
tools/create_images_xml.py
|
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import pdb
import os
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
## find_bears *.xml dirs
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nCreate xml from images in specified files and directories.\n \t example: create_images_xml -out imgs.xml abc.jpg bc/images ./bf/images\n',
formatter_class=RawTextHelpFormatter)
# parser.formatter.max_help_position = 50
parser.add_argument ('files', nargs='+')
parser.add_argument ('-out', '--output',
help='write output to specified file. Defaults to imgs.xml')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='print more messages')
# choices=[0, 1, 2, 3], help=argparse.SUPPRESS)
# help="increase output verbosity"
args = parser.parse_args()
# print "ls : ", args.ls
# print "files: ", args.files
if not args.output :
args.output = 'imgs.xml'
u.set_verbosity (args.verbosity)
u.create_imgs_xml (args.files, args.output)
if __name__ == "__main__":
main (sys.argv)
|
Create xml of all jpg/png from list of files & directories.
|
Create xml of all jpg/png from list of files & directories.
|
Python
|
mit
|
hypraptive/bearid,hypraptive/bearid,hypraptive/bearid
|
Create xml of all jpg/png from list of files & directories.
|
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import pdb
import os
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
## find_bears *.xml dirs
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nCreate xml from images in specified files and directories.\n \t example: create_images_xml -out imgs.xml abc.jpg bc/images ./bf/images\n',
formatter_class=RawTextHelpFormatter)
# parser.formatter.max_help_position = 50
parser.add_argument ('files', nargs='+')
parser.add_argument ('-out', '--output',
help='write output to specified file. Defaults to imgs.xml')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='print more messages')
# choices=[0, 1, 2, 3], help=argparse.SUPPRESS)
# help="increase output verbosity"
args = parser.parse_args()
# print "ls : ", args.ls
# print "files: ", args.files
if not args.output :
args.output = 'imgs.xml'
u.set_verbosity (args.verbosity)
u.create_imgs_xml (args.files, args.output)
if __name__ == "__main__":
main (sys.argv)
|
<commit_before><commit_msg>Create xml of all jpg/png from list of files & directories.<commit_after>
|
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import pdb
import os
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
## find_bears *.xml dirs
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nCreate xml from images in specified files and directories.\n \t example: create_images_xml -out imgs.xml abc.jpg bc/images ./bf/images\n',
formatter_class=RawTextHelpFormatter)
# parser.formatter.max_help_position = 50
parser.add_argument ('files', nargs='+')
parser.add_argument ('-out', '--output',
help='write output to specified file. Defaults to imgs.xml')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='print more messages')
# choices=[0, 1, 2, 3], help=argparse.SUPPRESS)
# help="increase output verbosity"
args = parser.parse_args()
# print "ls : ", args.ls
# print "files: ", args.files
if not args.output :
args.output = 'imgs.xml'
u.set_verbosity (args.verbosity)
u.create_imgs_xml (args.files, args.output)
if __name__ == "__main__":
main (sys.argv)
|
Create xml of all jpg/png from list of files & directories.#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import pdb
import os
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
## find_bears *.xml dirs
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nCreate xml from images in specified files and directories.\n \t example: create_images_xml -out imgs.xml abc.jpg bc/images ./bf/images\n',
formatter_class=RawTextHelpFormatter)
# parser.formatter.max_help_position = 50
parser.add_argument ('files', nargs='+')
parser.add_argument ('-out', '--output',
help='write output to specified file. Defaults to imgs.xml')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='print more messages')
# choices=[0, 1, 2, 3], help=argparse.SUPPRESS)
# help="increase output verbosity"
args = parser.parse_args()
# print "ls : ", args.ls
# print "files: ", args.files
if not args.output :
args.output = 'imgs.xml'
u.set_verbosity (args.verbosity)
u.create_imgs_xml (args.files, args.output)
if __name__ == "__main__":
main (sys.argv)
|
<commit_before><commit_msg>Create xml of all jpg/png from list of files & directories.<commit_after>#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import pdb
import os
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
## find_bears *.xml dirs
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nCreate xml from images in specified files and directories.\n \t example: create_images_xml -out imgs.xml abc.jpg bc/images ./bf/images\n',
formatter_class=RawTextHelpFormatter)
# parser.formatter.max_help_position = 50
parser.add_argument ('files', nargs='+')
parser.add_argument ('-out', '--output',
help='write output to specified file. Defaults to imgs.xml')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='print more messages')
# choices=[0, 1, 2, 3], help=argparse.SUPPRESS)
# help="increase output verbosity"
args = parser.parse_args()
# print "ls : ", args.ls
# print "files: ", args.files
if not args.output :
args.output = 'imgs.xml'
u.set_verbosity (args.verbosity)
u.create_imgs_xml (args.files, args.output)
if __name__ == "__main__":
main (sys.argv)
|
|
b07d9e5218ba1075ec5a2c8cf0d62c9c5ee0dd35
|
tests/debug_test.py
|
tests/debug_test.py
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import app
from . import runwsgi
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class DebugTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.debug_entries = []
def tearDown(self):
self.curl.close()
def debug_function(self, t, b):
self.debug_entries.append((t, b))
def test_perform_get_with_debug_function(self):
self.curl.setopt(pycurl.VERBOSE, 1)
self.curl.setopt(pycurl.DEBUGFUNCTION, self.debug_function)
self.curl.setopt(pycurl.URL, 'http://localhost:8380/success')
self.curl.perform()
# Some checks with no particular intent
self.check(0, 'About to connect')
self.check(0, 'Connected to localhost')
self.check(0, 'port 8380')
# request
self.check(2, 'GET /success HTTP/1.1')
# response
self.check(1, 'HTTP/1.0 200 OK')
self.check(1, 'Content-Length: 7')
# result
self.check(3, 'success')
def check(self, wanted_t, wanted_b):
for t, b in self.debug_entries:
if t == wanted_t and wanted_b in b:
return
assert False, "%d: %s not found in debug entries" % (wanted_t, wanted_b)
|
Debug test, ported from the old debug test
|
Debug test, ported from the old debug test
|
Python
|
lgpl-2.1
|
pycurl/pycurl,p/pycurl-archived,pycurl/pycurl,p/pycurl-archived,pycurl/pycurl,p/pycurl-archived
|
Debug test, ported from the old debug test
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import app
from . import runwsgi
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class DebugTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.debug_entries = []
def tearDown(self):
self.curl.close()
def debug_function(self, t, b):
self.debug_entries.append((t, b))
def test_perform_get_with_debug_function(self):
self.curl.setopt(pycurl.VERBOSE, 1)
self.curl.setopt(pycurl.DEBUGFUNCTION, self.debug_function)
self.curl.setopt(pycurl.URL, 'http://localhost:8380/success')
self.curl.perform()
# Some checks with no particular intent
self.check(0, 'About to connect')
self.check(0, 'Connected to localhost')
self.check(0, 'port 8380')
# request
self.check(2, 'GET /success HTTP/1.1')
# response
self.check(1, 'HTTP/1.0 200 OK')
self.check(1, 'Content-Length: 7')
# result
self.check(3, 'success')
def check(self, wanted_t, wanted_b):
for t, b in self.debug_entries:
if t == wanted_t and wanted_b in b:
return
assert False, "%d: %s not found in debug entries" % (wanted_t, wanted_b)
|
<commit_before><commit_msg>Debug test, ported from the old debug test<commit_after>
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import app
from . import runwsgi
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class DebugTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.debug_entries = []
def tearDown(self):
self.curl.close()
def debug_function(self, t, b):
self.debug_entries.append((t, b))
def test_perform_get_with_debug_function(self):
self.curl.setopt(pycurl.VERBOSE, 1)
self.curl.setopt(pycurl.DEBUGFUNCTION, self.debug_function)
self.curl.setopt(pycurl.URL, 'http://localhost:8380/success')
self.curl.perform()
# Some checks with no particular intent
self.check(0, 'About to connect')
self.check(0, 'Connected to localhost')
self.check(0, 'port 8380')
# request
self.check(2, 'GET /success HTTP/1.1')
# response
self.check(1, 'HTTP/1.0 200 OK')
self.check(1, 'Content-Length: 7')
# result
self.check(3, 'success')
def check(self, wanted_t, wanted_b):
for t, b in self.debug_entries:
if t == wanted_t and wanted_b in b:
return
assert False, "%d: %s not found in debug entries" % (wanted_t, wanted_b)
|
Debug test, ported from the old debug test#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import app
from . import runwsgi
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class DebugTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.debug_entries = []
def tearDown(self):
self.curl.close()
def debug_function(self, t, b):
self.debug_entries.append((t, b))
def test_perform_get_with_debug_function(self):
self.curl.setopt(pycurl.VERBOSE, 1)
self.curl.setopt(pycurl.DEBUGFUNCTION, self.debug_function)
self.curl.setopt(pycurl.URL, 'http://localhost:8380/success')
self.curl.perform()
# Some checks with no particular intent
self.check(0, 'About to connect')
self.check(0, 'Connected to localhost')
self.check(0, 'port 8380')
# request
self.check(2, 'GET /success HTTP/1.1')
# response
self.check(1, 'HTTP/1.0 200 OK')
self.check(1, 'Content-Length: 7')
# result
self.check(3, 'success')
def check(self, wanted_t, wanted_b):
for t, b in self.debug_entries:
if t == wanted_t and wanted_b in b:
return
assert False, "%d: %s not found in debug entries" % (wanted_t, wanted_b)
|
<commit_before><commit_msg>Debug test, ported from the old debug test<commit_after>#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import app
from . import runwsgi
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class DebugTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.debug_entries = []
def tearDown(self):
self.curl.close()
def debug_function(self, t, b):
self.debug_entries.append((t, b))
def test_perform_get_with_debug_function(self):
self.curl.setopt(pycurl.VERBOSE, 1)
self.curl.setopt(pycurl.DEBUGFUNCTION, self.debug_function)
self.curl.setopt(pycurl.URL, 'http://localhost:8380/success')
self.curl.perform()
# Some checks with no particular intent
self.check(0, 'About to connect')
self.check(0, 'Connected to localhost')
self.check(0, 'port 8380')
# request
self.check(2, 'GET /success HTTP/1.1')
# response
self.check(1, 'HTTP/1.0 200 OK')
self.check(1, 'Content-Length: 7')
# result
self.check(3, 'success')
def check(self, wanted_t, wanted_b):
for t, b in self.debug_entries:
if t == wanted_t and wanted_b in b:
return
assert False, "%d: %s not found in debug entries" % (wanted_t, wanted_b)
|
|
946b693e52fca4e55f0d4dd9c07edd609f26297f
|
tests/test_score.py
|
tests/test_score.py
|
from toolshed.importer import create_project
from toolshed.updater import update_projects_score
def test_oauth_rankings():
flask_dance = create_project(pypi_url="https://pypi.python.org/pypi/Flask-Dance", github_url="https://github.com/singingwolfboy/flask-dance")
flask_oauth = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuth", github_url="https://github.com/mitsuhiko/flask-oauth")
flask_oauthlib = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuthlib", github_url="https://github.com/lepture/flask-oauthlib")
project_list = [flask_dance, flask_oauth, flask_oauthlib]
update_projects_score(project_list)
assert flask_oauthlib.score > flask_oauth.score and flask_oauth.score > flask_dance.score
|
Put in sanity-check for project scores.
|
Put in sanity-check for project scores.
|
Python
|
mit
|
PythonClutch/python-clutch,PythonClutch/python-clutch,PythonClutch/python-clutch
|
Put in sanity-check for project scores.
|
from toolshed.importer import create_project
from toolshed.updater import update_projects_score
def test_oauth_rankings():
flask_dance = create_project(pypi_url="https://pypi.python.org/pypi/Flask-Dance", github_url="https://github.com/singingwolfboy/flask-dance")
flask_oauth = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuth", github_url="https://github.com/mitsuhiko/flask-oauth")
flask_oauthlib = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuthlib", github_url="https://github.com/lepture/flask-oauthlib")
project_list = [flask_dance, flask_oauth, flask_oauthlib]
update_projects_score(project_list)
assert flask_oauthlib.score > flask_oauth.score and flask_oauth.score > flask_dance.score
|
<commit_before><commit_msg>Put in sanity-check for project scores.<commit_after>
|
from toolshed.importer import create_project
from toolshed.updater import update_projects_score
def test_oauth_rankings():
flask_dance = create_project(pypi_url="https://pypi.python.org/pypi/Flask-Dance", github_url="https://github.com/singingwolfboy/flask-dance")
flask_oauth = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuth", github_url="https://github.com/mitsuhiko/flask-oauth")
flask_oauthlib = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuthlib", github_url="https://github.com/lepture/flask-oauthlib")
project_list = [flask_dance, flask_oauth, flask_oauthlib]
update_projects_score(project_list)
assert flask_oauthlib.score > flask_oauth.score and flask_oauth.score > flask_dance.score
|
Put in sanity-check for project scores.from toolshed.importer import create_project
from toolshed.updater import update_projects_score
def test_oauth_rankings():
flask_dance = create_project(pypi_url="https://pypi.python.org/pypi/Flask-Dance", github_url="https://github.com/singingwolfboy/flask-dance")
flask_oauth = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuth", github_url="https://github.com/mitsuhiko/flask-oauth")
flask_oauthlib = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuthlib", github_url="https://github.com/lepture/flask-oauthlib")
project_list = [flask_dance, flask_oauth, flask_oauthlib]
update_projects_score(project_list)
assert flask_oauthlib.score > flask_oauth.score and flask_oauth.score > flask_dance.score
|
<commit_before><commit_msg>Put in sanity-check for project scores.<commit_after>from toolshed.importer import create_project
from toolshed.updater import update_projects_score
def test_oauth_rankings():
flask_dance = create_project(pypi_url="https://pypi.python.org/pypi/Flask-Dance", github_url="https://github.com/singingwolfboy/flask-dance")
flask_oauth = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuth", github_url="https://github.com/mitsuhiko/flask-oauth")
flask_oauthlib = create_project(pypi_url="https://pypi.python.org/pypi/Flask-OAuthlib", github_url="https://github.com/lepture/flask-oauthlib")
project_list = [flask_dance, flask_oauth, flask_oauthlib]
update_projects_score(project_list)
assert flask_oauthlib.score > flask_oauth.score and flask_oauth.score > flask_dance.score
|
|
f670fabfecb6dbcf2ce5bcc3e312d61064463820
|
tests/test_utils.py
|
tests/test_utils.py
|
# -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import os
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.utils as utils # noqa
@pytest.mark.parametrize('hex_value', [
'#FFFFFF',
'#0000FF',
'#FF0000',
'#00FF00',
'#808080',
'#FFFF00',
'#00FFFF',
'#EF8BA0',
])
def test_hex_to_rgb_conversion(hex_value):
"""
Test the conversion from a RGB hex value to a RGB channel triplet
"""
red, green, blue = utils.hex_to_rgb(hex_value)
assert '#{:02X}{:02X}{:02X}'.format(red, green, blue) == hex_value
|
Add tests for utils module
|
Add tests for utils module
|
Python
|
mit
|
timofurrer/colorful
|
Add tests for utils module
|
# -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import os
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.utils as utils # noqa
@pytest.mark.parametrize('hex_value', [
'#FFFFFF',
'#0000FF',
'#FF0000',
'#00FF00',
'#808080',
'#FFFF00',
'#00FFFF',
'#EF8BA0',
])
def test_hex_to_rgb_conversion(hex_value):
"""
Test the conversion from a RGB hex value to a RGB channel triplet
"""
red, green, blue = utils.hex_to_rgb(hex_value)
assert '#{:02X}{:02X}{:02X}'.format(red, green, blue) == hex_value
|
<commit_before><commit_msg>Add tests for utils module<commit_after>
|
# -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import os
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.utils as utils # noqa
@pytest.mark.parametrize('hex_value', [
'#FFFFFF',
'#0000FF',
'#FF0000',
'#00FF00',
'#808080',
'#FFFF00',
'#00FFFF',
'#EF8BA0',
])
def test_hex_to_rgb_conversion(hex_value):
"""
Test the conversion from a RGB hex value to a RGB channel triplet
"""
red, green, blue = utils.hex_to_rgb(hex_value)
assert '#{:02X}{:02X}{:02X}'.format(red, green, blue) == hex_value
|
Add tests for utils module# -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import os
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.utils as utils # noqa
@pytest.mark.parametrize('hex_value', [
'#FFFFFF',
'#0000FF',
'#FF0000',
'#00FF00',
'#808080',
'#FFFF00',
'#00FFFF',
'#EF8BA0',
])
def test_hex_to_rgb_conversion(hex_value):
"""
Test the conversion from a RGB hex value to a RGB channel triplet
"""
red, green, blue = utils.hex_to_rgb(hex_value)
assert '#{:02X}{:02X}{:02X}'.format(red, green, blue) == hex_value
|
<commit_before><commit_msg>Add tests for utils module<commit_after># -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import os
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.utils as utils # noqa
@pytest.mark.parametrize('hex_value', [
'#FFFFFF',
'#0000FF',
'#FF0000',
'#00FF00',
'#808080',
'#FFFF00',
'#00FFFF',
'#EF8BA0',
])
def test_hex_to_rgb_conversion(hex_value):
"""
Test the conversion from a RGB hex value to a RGB channel triplet
"""
red, green, blue = utils.hex_to_rgb(hex_value)
assert '#{:02X}{:02X}{:02X}'.format(red, green, blue) == hex_value
|
|
95652f83865913dd6989374faa3ac2be8d50d981
|
buildlapse/gui.py
|
buildlapse/gui.py
|
# gui.py
# Generic GUI crap
from gi.repository import Gtk
class CheckEntry(Gtk.Box):
def __init__(self, labeltxt, togglef = None):
Gtk.Box.__init__(self, spacing=2)
self.label = Gtk.Label(labeltxt, valign=0)
self.check = Gtk.CheckButton()
self.pack_start(self.check, True, True, 0)
self.pack_start(self.label, False, True, 0)
def _toggle(x, y):
if togglef != None:
togglef(x, y)
self.check.connect("toggled", _toggle, labeltxt)
@property
def checked(self):
return self.check.get_active()
|
Add generic settings-related GUI util module
|
Add generic settings-related GUI util module
|
Python
|
apache-2.0
|
twoodford/nxt-timelapse,twoodford/nxt-timelapse,twoodford/nxt-timelapse
|
Add generic settings-related GUI util module
|
# gui.py
# Generic GUI crap
from gi.repository import Gtk
class CheckEntry(Gtk.Box):
def __init__(self, labeltxt, togglef = None):
Gtk.Box.__init__(self, spacing=2)
self.label = Gtk.Label(labeltxt, valign=0)
self.check = Gtk.CheckButton()
self.pack_start(self.check, True, True, 0)
self.pack_start(self.label, False, True, 0)
def _toggle(x, y):
if togglef != None:
togglef(x, y)
self.check.connect("toggled", _toggle, labeltxt)
@property
def checked(self):
return self.check.get_active()
|
<commit_before><commit_msg>Add generic settings-related GUI util module<commit_after>
|
# gui.py
# Generic GUI crap
from gi.repository import Gtk
class CheckEntry(Gtk.Box):
def __init__(self, labeltxt, togglef = None):
Gtk.Box.__init__(self, spacing=2)
self.label = Gtk.Label(labeltxt, valign=0)
self.check = Gtk.CheckButton()
self.pack_start(self.check, True, True, 0)
self.pack_start(self.label, False, True, 0)
def _toggle(x, y):
if togglef != None:
togglef(x, y)
self.check.connect("toggled", _toggle, labeltxt)
@property
def checked(self):
return self.check.get_active()
|
Add generic settings-related GUI util module# gui.py
# Generic GUI crap
from gi.repository import Gtk
class CheckEntry(Gtk.Box):
def __init__(self, labeltxt, togglef = None):
Gtk.Box.__init__(self, spacing=2)
self.label = Gtk.Label(labeltxt, valign=0)
self.check = Gtk.CheckButton()
self.pack_start(self.check, True, True, 0)
self.pack_start(self.label, False, True, 0)
def _toggle(x, y):
if togglef != None:
togglef(x, y)
self.check.connect("toggled", _toggle, labeltxt)
@property
def checked(self):
return self.check.get_active()
|
<commit_before><commit_msg>Add generic settings-related GUI util module<commit_after># gui.py
# Generic GUI crap
from gi.repository import Gtk
class CheckEntry(Gtk.Box):
def __init__(self, labeltxt, togglef = None):
Gtk.Box.__init__(self, spacing=2)
self.label = Gtk.Label(labeltxt, valign=0)
self.check = Gtk.CheckButton()
self.pack_start(self.check, True, True, 0)
self.pack_start(self.label, False, True, 0)
def _toggle(x, y):
if togglef != None:
togglef(x, y)
self.check.connect("toggled", _toggle, labeltxt)
@property
def checked(self):
return self.check.get_active()
|
|
fe1b6725855a64898d385efe4e616dc04513b61d
|
tests/test_03_runtime.py
|
tests/test_03_runtime.py
|
"""Using depends() to mark dependencies at runtime.
"""
import pytest
def test_skip_depend_runtime(ctestdir):
"""One test is skipped, other dependent tests are skipped as well.
This also includes indirect dependencies.
"""
ctestdir.makepyfile("""
import pytest
from pytest_dependency import depends
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pytest.skip("explicit skip")
@pytest.mark.dependency()
def test_c(request):
depends(request, ["test_b"])
pass
@pytest.mark.dependency()
def test_d(request):
depends(request, ["test_a", "test_c"])
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=1, skipped=3, failed=0)
result.stdout.fnmatch_lines("""
*::test_a PASSED
*::test_b SKIPPED
*::test_c SKIPPED
*::test_d SKIPPED
""")
|
Add test for the depends() function.
|
Add test for the depends() function.
|
Python
|
apache-2.0
|
RKrahl/pytest-dependency
|
Add test for the depends() function.
|
"""Using depends() to mark dependencies at runtime.
"""
import pytest
def test_skip_depend_runtime(ctestdir):
"""One test is skipped, other dependent tests are skipped as well.
This also includes indirect dependencies.
"""
ctestdir.makepyfile("""
import pytest
from pytest_dependency import depends
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pytest.skip("explicit skip")
@pytest.mark.dependency()
def test_c(request):
depends(request, ["test_b"])
pass
@pytest.mark.dependency()
def test_d(request):
depends(request, ["test_a", "test_c"])
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=1, skipped=3, failed=0)
result.stdout.fnmatch_lines("""
*::test_a PASSED
*::test_b SKIPPED
*::test_c SKIPPED
*::test_d SKIPPED
""")
|
<commit_before><commit_msg>Add test for the depends() function.<commit_after>
|
"""Using depends() to mark dependencies at runtime.
"""
import pytest
def test_skip_depend_runtime(ctestdir):
"""One test is skipped, other dependent tests are skipped as well.
This also includes indirect dependencies.
"""
ctestdir.makepyfile("""
import pytest
from pytest_dependency import depends
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pytest.skip("explicit skip")
@pytest.mark.dependency()
def test_c(request):
depends(request, ["test_b"])
pass
@pytest.mark.dependency()
def test_d(request):
depends(request, ["test_a", "test_c"])
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=1, skipped=3, failed=0)
result.stdout.fnmatch_lines("""
*::test_a PASSED
*::test_b SKIPPED
*::test_c SKIPPED
*::test_d SKIPPED
""")
|
Add test for the depends() function."""Using depends() to mark dependencies at runtime.
"""
import pytest
def test_skip_depend_runtime(ctestdir):
"""One test is skipped, other dependent tests are skipped as well.
This also includes indirect dependencies.
"""
ctestdir.makepyfile("""
import pytest
from pytest_dependency import depends
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pytest.skip("explicit skip")
@pytest.mark.dependency()
def test_c(request):
depends(request, ["test_b"])
pass
@pytest.mark.dependency()
def test_d(request):
depends(request, ["test_a", "test_c"])
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=1, skipped=3, failed=0)
result.stdout.fnmatch_lines("""
*::test_a PASSED
*::test_b SKIPPED
*::test_c SKIPPED
*::test_d SKIPPED
""")
|
<commit_before><commit_msg>Add test for the depends() function.<commit_after>"""Using depends() to mark dependencies at runtime.
"""
import pytest
def test_skip_depend_runtime(ctestdir):
"""One test is skipped, other dependent tests are skipped as well.
This also includes indirect dependencies.
"""
ctestdir.makepyfile("""
import pytest
from pytest_dependency import depends
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pytest.skip("explicit skip")
@pytest.mark.dependency()
def test_c(request):
depends(request, ["test_b"])
pass
@pytest.mark.dependency()
def test_d(request):
depends(request, ["test_a", "test_c"])
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=1, skipped=3, failed=0)
result.stdout.fnmatch_lines("""
*::test_a PASSED
*::test_b SKIPPED
*::test_c SKIPPED
*::test_d SKIPPED
""")
|
|
2b29bd4c1a15136a066a61e02920721ef8117d23
|
tests/test_converters.py
|
tests/test_converters.py
|
import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
Bring coverage for the converters module up to 100%.
|
Bring coverage for the converters module up to 100%.
|
Python
|
bsd-3-clause
|
lann/python-beaker
|
Bring coverage for the converters module up to 100%.
|
import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Bring coverage for the converters module up to 100%.<commit_after>
|
import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
Bring coverage for the converters module up to 100%.import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Bring coverage for the converters module up to 100%.<commit_after>import unittest
from beaker.converters import asbool, aslist
class AsBool(unittest.TestCase):
def test_truth_str(self):
for v in ('true', 'yes', 'on', 'y', 't', '1'):
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
v = v.upper()
self.assertTrue(asbool(v), "%s should be considered True" % (v,))
def test_false_str(self):
for v in ('false', 'no', 'off', 'n', 'f', '0'):
self.assertFalse(asbool(v), v)
v = v.upper()
self.assertFalse(asbool(v), v)
def test_coerce(self):
"""Things that can coerce right straight to booleans."""
self.assertTrue(asbool(True))
self.assertTrue(asbool(1))
self.assertTrue(asbool(42))
self.assertFalse(asbool(False))
self.assertFalse(asbool(0))
def test_bad_values(self):
self.assertRaises(ValueError, asbool, ('mommy!'))
self.assertRaises(ValueError, asbool, (u'Blargl?'))
class AsList(unittest.TestCase):
def test_string(self):
self.assertEqual(aslist('abc'), ['abc'])
self.assertEqual(aslist('1a2a3'), [1, 2, 3])
def test_None(self):
self.assertEqual(aslist(None), [])
def test_listy_noops(self):
"""Lists and tuples should come back unchanged."""
x = [1, 2, 3]
self.assertEqual(aslist(x), x)
y = ('z', 'y', 'x')
self.assertEqual(aslist(y), y)
def test_listify(self):
"""Other objects should just result in a single item list."""
self.assertEqual(aslist(dict()), {})
if __name__ == '__main__':
unittest.main()
|
|
1a41aa1088965e0b30bd3b53ac400cd6a0f8ce69
|
tests/test_parser_api.py
|
tests/test_parser_api.py
|
from xml.etree import ElementTree
from junit2htmlreport import parser as j2h
def test_public_api():
container = j2h.Junit(xmlstring="""<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="suite"></testsuite>
""")
container.filename = "test_results.xml"
document = j2h.Suite()
container.suites = [document]
document.name = "test report"
document.duration = 0.1
document.package = "com.tests"
first = j2h.Class()
first.name = "myclass"
document.classes[first.name] = first
test1 = j2h.Case()
test1.name = "test_one"
test1.duration = 1.1
test1.testclass = first
first.cases.append(test1)
test2 = j2h.Case()
test2.name = "test_two"
test2.duration = 1.2
test2.testclass = first
first.cases.append(test2)
skipped1 = j2h.Case()
skipped1.name = "test_skippy"
skipped1.duration = 1.3
skipped1.testclass = first
skipped1.skipped = "test skipped"
skipped1.skipped_msg = "test was skipped at runtime"
first.cases.append(skipped1)
failed1 = j2h.Case()
failed1.name = "test_bad"
failed1.duration = 1.4
failed1.testclass = first
failed1.failure = "test failed"
failed1.failure_msg = "an exception happened"
first.cases.append(failed1)
html = container.html()
assert html
assert "<html>" in html
assert """<span class="testname"><b>test_skippy</b></span><br/>""" in html
assert """<div class="failure"><b>Failed: an exception happened</b><br/>""" in html
|
Add unit tests for our main public API so we can start to re-impliment the parser
|
Add unit tests for our main public API so we can start to re-impliment the parser
|
Python
|
mit
|
inorton/junit2html
|
Add unit tests for our main public API so we can start to re-impliment the parser
|
from xml.etree import ElementTree
from junit2htmlreport import parser as j2h
def test_public_api():
container = j2h.Junit(xmlstring="""<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="suite"></testsuite>
""")
container.filename = "test_results.xml"
document = j2h.Suite()
container.suites = [document]
document.name = "test report"
document.duration = 0.1
document.package = "com.tests"
first = j2h.Class()
first.name = "myclass"
document.classes[first.name] = first
test1 = j2h.Case()
test1.name = "test_one"
test1.duration = 1.1
test1.testclass = first
first.cases.append(test1)
test2 = j2h.Case()
test2.name = "test_two"
test2.duration = 1.2
test2.testclass = first
first.cases.append(test2)
skipped1 = j2h.Case()
skipped1.name = "test_skippy"
skipped1.duration = 1.3
skipped1.testclass = first
skipped1.skipped = "test skipped"
skipped1.skipped_msg = "test was skipped at runtime"
first.cases.append(skipped1)
failed1 = j2h.Case()
failed1.name = "test_bad"
failed1.duration = 1.4
failed1.testclass = first
failed1.failure = "test failed"
failed1.failure_msg = "an exception happened"
first.cases.append(failed1)
html = container.html()
assert html
assert "<html>" in html
assert """<span class="testname"><b>test_skippy</b></span><br/>""" in html
assert """<div class="failure"><b>Failed: an exception happened</b><br/>""" in html
|
<commit_before><commit_msg>Add unit tests for our main public API so we can start to re-impliment the parser<commit_after>
|
from xml.etree import ElementTree
from junit2htmlreport import parser as j2h
def test_public_api():
container = j2h.Junit(xmlstring="""<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="suite"></testsuite>
""")
container.filename = "test_results.xml"
document = j2h.Suite()
container.suites = [document]
document.name = "test report"
document.duration = 0.1
document.package = "com.tests"
first = j2h.Class()
first.name = "myclass"
document.classes[first.name] = first
test1 = j2h.Case()
test1.name = "test_one"
test1.duration = 1.1
test1.testclass = first
first.cases.append(test1)
test2 = j2h.Case()
test2.name = "test_two"
test2.duration = 1.2
test2.testclass = first
first.cases.append(test2)
skipped1 = j2h.Case()
skipped1.name = "test_skippy"
skipped1.duration = 1.3
skipped1.testclass = first
skipped1.skipped = "test skipped"
skipped1.skipped_msg = "test was skipped at runtime"
first.cases.append(skipped1)
failed1 = j2h.Case()
failed1.name = "test_bad"
failed1.duration = 1.4
failed1.testclass = first
failed1.failure = "test failed"
failed1.failure_msg = "an exception happened"
first.cases.append(failed1)
html = container.html()
assert html
assert "<html>" in html
assert """<span class="testname"><b>test_skippy</b></span><br/>""" in html
assert """<div class="failure"><b>Failed: an exception happened</b><br/>""" in html
|
Add unit tests for our main public API so we can start to re-impliment the parserfrom xml.etree import ElementTree
from junit2htmlreport import parser as j2h
def test_public_api():
container = j2h.Junit(xmlstring="""<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="suite"></testsuite>
""")
container.filename = "test_results.xml"
document = j2h.Suite()
container.suites = [document]
document.name = "test report"
document.duration = 0.1
document.package = "com.tests"
first = j2h.Class()
first.name = "myclass"
document.classes[first.name] = first
test1 = j2h.Case()
test1.name = "test_one"
test1.duration = 1.1
test1.testclass = first
first.cases.append(test1)
test2 = j2h.Case()
test2.name = "test_two"
test2.duration = 1.2
test2.testclass = first
first.cases.append(test2)
skipped1 = j2h.Case()
skipped1.name = "test_skippy"
skipped1.duration = 1.3
skipped1.testclass = first
skipped1.skipped = "test skipped"
skipped1.skipped_msg = "test was skipped at runtime"
first.cases.append(skipped1)
failed1 = j2h.Case()
failed1.name = "test_bad"
failed1.duration = 1.4
failed1.testclass = first
failed1.failure = "test failed"
failed1.failure_msg = "an exception happened"
first.cases.append(failed1)
html = container.html()
assert html
assert "<html>" in html
assert """<span class="testname"><b>test_skippy</b></span><br/>""" in html
assert """<div class="failure"><b>Failed: an exception happened</b><br/>""" in html
|
<commit_before><commit_msg>Add unit tests for our main public API so we can start to re-impliment the parser<commit_after>from xml.etree import ElementTree
from junit2htmlreport import parser as j2h
def test_public_api():
container = j2h.Junit(xmlstring="""<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="suite"></testsuite>
""")
container.filename = "test_results.xml"
document = j2h.Suite()
container.suites = [document]
document.name = "test report"
document.duration = 0.1
document.package = "com.tests"
first = j2h.Class()
first.name = "myclass"
document.classes[first.name] = first
test1 = j2h.Case()
test1.name = "test_one"
test1.duration = 1.1
test1.testclass = first
first.cases.append(test1)
test2 = j2h.Case()
test2.name = "test_two"
test2.duration = 1.2
test2.testclass = first
first.cases.append(test2)
skipped1 = j2h.Case()
skipped1.name = "test_skippy"
skipped1.duration = 1.3
skipped1.testclass = first
skipped1.skipped = "test skipped"
skipped1.skipped_msg = "test was skipped at runtime"
first.cases.append(skipped1)
failed1 = j2h.Case()
failed1.name = "test_bad"
failed1.duration = 1.4
failed1.testclass = first
failed1.failure = "test failed"
failed1.failure_msg = "an exception happened"
first.cases.append(failed1)
html = container.html()
assert html
assert "<html>" in html
assert """<span class="testname"><b>test_skippy</b></span><br/>""" in html
assert """<div class="failure"><b>Failed: an exception happened</b><br/>""" in html
|
|
9862a89b6eab4b8c1b87f338ee71e683ca387765
|
simulator-perfect.py
|
simulator-perfect.py
|
#!/usr/bin/env python3
import gzip
import itertools
import timer
import sys
import utils
# A set of files already in the storage
seen = set()
# The total number of uploads
total_uploads = 0
# The number of files in the storage
files_in = 0
tmr = timer.Timer()
for (hsh, _) in utils.read_upload_stream():
if hsh not in seen:
files_in += 1
seen.add(hsh)
total_uploads += 1
if total_uploads % utils.REPORT_FREQUENCY == 0:
print("%s uploads, percentage %.4f, time %s, %s" % (
utils.num_fmt(total_uploads),
1 - files_in / total_uploads,
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
dedup_percentage = 1 - files_in / total_uploads
print("+++ Simulation complete. dedup_percentage=%f" % dedup_percentage, file=sys.stderr)
|
Add a simulator for measuring perfect deduplication
|
Add a simulator for measuring perfect deduplication
|
Python
|
apache-2.0
|
sjakthol/dedup-simulator,sjakthol/dedup-simulator
|
Add a simulator for measuring perfect deduplication
|
#!/usr/bin/env python3
import gzip
import itertools
import timer
import sys
import utils
# A set of files already in the storage
seen = set()
# The total number of uploads
total_uploads = 0
# The number of files in the storage
files_in = 0
tmr = timer.Timer()
for (hsh, _) in utils.read_upload_stream():
if hsh not in seen:
files_in += 1
seen.add(hsh)
total_uploads += 1
if total_uploads % utils.REPORT_FREQUENCY == 0:
print("%s uploads, percentage %.4f, time %s, %s" % (
utils.num_fmt(total_uploads),
1 - files_in / total_uploads,
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
dedup_percentage = 1 - files_in / total_uploads
print("+++ Simulation complete. dedup_percentage=%f" % dedup_percentage, file=sys.stderr)
|
<commit_before><commit_msg>Add a simulator for measuring perfect deduplication<commit_after>
|
#!/usr/bin/env python3
import gzip
import itertools
import timer
import sys
import utils
# A set of files already in the storage
seen = set()
# The total number of uploads
total_uploads = 0
# The number of files in the storage
files_in = 0
tmr = timer.Timer()
for (hsh, _) in utils.read_upload_stream():
if hsh not in seen:
files_in += 1
seen.add(hsh)
total_uploads += 1
if total_uploads % utils.REPORT_FREQUENCY == 0:
print("%s uploads, percentage %.4f, time %s, %s" % (
utils.num_fmt(total_uploads),
1 - files_in / total_uploads,
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
dedup_percentage = 1 - files_in / total_uploads
print("+++ Simulation complete. dedup_percentage=%f" % dedup_percentage, file=sys.stderr)
|
Add a simulator for measuring perfect deduplication#!/usr/bin/env python3
import gzip
import itertools
import timer
import sys
import utils
# A set of files already in the storage
seen = set()
# The total number of uploads
total_uploads = 0
# The number of files in the storage
files_in = 0
tmr = timer.Timer()
for (hsh, _) in utils.read_upload_stream():
if hsh not in seen:
files_in += 1
seen.add(hsh)
total_uploads += 1
if total_uploads % utils.REPORT_FREQUENCY == 0:
print("%s uploads, percentage %.4f, time %s, %s" % (
utils.num_fmt(total_uploads),
1 - files_in / total_uploads,
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
dedup_percentage = 1 - files_in / total_uploads
print("+++ Simulation complete. dedup_percentage=%f" % dedup_percentage, file=sys.stderr)
|
<commit_before><commit_msg>Add a simulator for measuring perfect deduplication<commit_after>#!/usr/bin/env python3
import gzip
import itertools
import timer
import sys
import utils
# A set of files already in the storage
seen = set()
# The total number of uploads
total_uploads = 0
# The number of files in the storage
files_in = 0
tmr = timer.Timer()
for (hsh, _) in utils.read_upload_stream():
if hsh not in seen:
files_in += 1
seen.add(hsh)
total_uploads += 1
if total_uploads % utils.REPORT_FREQUENCY == 0:
print("%s uploads, percentage %.4f, time %s, %s" % (
utils.num_fmt(total_uploads),
1 - files_in / total_uploads,
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
dedup_percentage = 1 - files_in / total_uploads
print("+++ Simulation complete. dedup_percentage=%f" % dedup_percentage, file=sys.stderr)
|
|
fc34c641688d0f87987297f8e47c5fdf9e26334c
|
pombola/core/management/commands/core_list_person_primary_images.py
|
pombola/core/management/commands/core_list_person_primary_images.py
|
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
def handle(self, **options):
help = 'List the paths of primary person images relative to media root'
for p in Person.objects.filter(hidden=False):
image_file_field = p.primary_image()
if not image_file_field:
continue
print image_file_field.name
|
Add a command to list all primary image filenames for people
|
Add a command to list all primary image filenames for people
This is useful to make a list of files to copy to your development copy
to make sure you don't have broken images on people pages, or pages that
list people.
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,geoffkilpin/pombola
|
Add a command to list all primary image filenames for people
This is useful to make a list of files to copy to your development copy
to make sure you don't have broken images on people pages, or pages that
list people.
|
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
def handle(self, **options):
help = 'List the paths of primary person images relative to media root'
for p in Person.objects.filter(hidden=False):
image_file_field = p.primary_image()
if not image_file_field:
continue
print image_file_field.name
|
<commit_before><commit_msg>Add a command to list all primary image filenames for people
This is useful to make a list of files to copy to your development copy
to make sure you don't have broken images on people pages, or pages that
list people.<commit_after>
|
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
def handle(self, **options):
help = 'List the paths of primary person images relative to media root'
for p in Person.objects.filter(hidden=False):
image_file_field = p.primary_image()
if not image_file_field:
continue
print image_file_field.name
|
Add a command to list all primary image filenames for people
This is useful to make a list of files to copy to your development copy
to make sure you don't have broken images on people pages, or pages that
list people.from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
def handle(self, **options):
help = 'List the paths of primary person images relative to media root'
for p in Person.objects.filter(hidden=False):
image_file_field = p.primary_image()
if not image_file_field:
continue
print image_file_field.name
|
<commit_before><commit_msg>Add a command to list all primary image filenames for people
This is useful to make a list of files to copy to your development copy
to make sure you don't have broken images on people pages, or pages that
list people.<commit_after>from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
def handle(self, **options):
help = 'List the paths of primary person images relative to media root'
for p in Person.objects.filter(hidden=False):
image_file_field = p.primary_image()
if not image_file_field:
continue
print image_file_field.name
|
|
f88b7f0b70205e9ac6f41f8acffdf96e8b263e9e
|
CRS_Tests_Journal.py
|
CRS_Tests_Journal.py
|
from ftw import ruleset, logchecker, testrunner
import pytest
import sys
import re
import os
import ConfigParser
def test_crs(ruleset, test, logchecker_obj, with_journal, tablename):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage_with_journal(test.ruleset_meta['name'], test, with_journal, tablename, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
import datetime
config = ConfigParser.ConfigParser()
config.read("settings.ini")
log_location = config.get('settings', 'log_location')
our_logs = []
pattern = re.compile(r"\[([A-Z][a-z]{2} [A-z][a-z]{2} \d{1,2} \d{1,2}\:\d{1,2}\:\d{1,2}\.\d+? \d{4})\]")
for lline in self.reverse_readline(log_location):
# Extract dates from each line
match = re.match(pattern,lline)
if match:
log_date = match.group(1)
# Convert our date
log_date = datetime.datetime.strptime(log_date, "%a %b %d %H:%M:%S.%f %Y")
ftw_start = self.start
ftw_end = self.end
# If we have a log date in range
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if(log_date < ftw_start):
break
return our_logs
@pytest.fixture
def logchecker_obj():
return FooLogChecker()
|
Add journal script (needs next version of ftw pinned), remove conftest.py
|
Add journal script (needs next version of ftw pinned), remove conftest.py
|
Python
|
apache-2.0
|
csjperon/OWASP-CRS-regressions,SpiderLabs/OWASP-CRS-regressions
|
Add journal script (needs next version of ftw pinned), remove conftest.py
|
from ftw import ruleset, logchecker, testrunner
import pytest
import sys
import re
import os
import ConfigParser
def test_crs(ruleset, test, logchecker_obj, with_journal, tablename):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage_with_journal(test.ruleset_meta['name'], test, with_journal, tablename, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
import datetime
config = ConfigParser.ConfigParser()
config.read("settings.ini")
log_location = config.get('settings', 'log_location')
our_logs = []
pattern = re.compile(r"\[([A-Z][a-z]{2} [A-z][a-z]{2} \d{1,2} \d{1,2}\:\d{1,2}\:\d{1,2}\.\d+? \d{4})\]")
for lline in self.reverse_readline(log_location):
# Extract dates from each line
match = re.match(pattern,lline)
if match:
log_date = match.group(1)
# Convert our date
log_date = datetime.datetime.strptime(log_date, "%a %b %d %H:%M:%S.%f %Y")
ftw_start = self.start
ftw_end = self.end
# If we have a log date in range
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if(log_date < ftw_start):
break
return our_logs
@pytest.fixture
def logchecker_obj():
return FooLogChecker()
|
<commit_before><commit_msg>Add journal script (needs next version of ftw pinned), remove conftest.py<commit_after>
|
from ftw import ruleset, logchecker, testrunner
import pytest
import sys
import re
import os
import ConfigParser
def test_crs(ruleset, test, logchecker_obj, with_journal, tablename):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage_with_journal(test.ruleset_meta['name'], test, with_journal, tablename, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
import datetime
config = ConfigParser.ConfigParser()
config.read("settings.ini")
log_location = config.get('settings', 'log_location')
our_logs = []
pattern = re.compile(r"\[([A-Z][a-z]{2} [A-z][a-z]{2} \d{1,2} \d{1,2}\:\d{1,2}\:\d{1,2}\.\d+? \d{4})\]")
for lline in self.reverse_readline(log_location):
# Extract dates from each line
match = re.match(pattern,lline)
if match:
log_date = match.group(1)
# Convert our date
log_date = datetime.datetime.strptime(log_date, "%a %b %d %H:%M:%S.%f %Y")
ftw_start = self.start
ftw_end = self.end
# If we have a log date in range
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if(log_date < ftw_start):
break
return our_logs
@pytest.fixture
def logchecker_obj():
return FooLogChecker()
|
Add journal script (needs next version of ftw pinned), remove conftest.pyfrom ftw import ruleset, logchecker, testrunner
import pytest
import sys
import re
import os
import ConfigParser
def test_crs(ruleset, test, logchecker_obj, with_journal, tablename):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage_with_journal(test.ruleset_meta['name'], test, with_journal, tablename, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
import datetime
config = ConfigParser.ConfigParser()
config.read("settings.ini")
log_location = config.get('settings', 'log_location')
our_logs = []
pattern = re.compile(r"\[([A-Z][a-z]{2} [A-z][a-z]{2} \d{1,2} \d{1,2}\:\d{1,2}\:\d{1,2}\.\d+? \d{4})\]")
for lline in self.reverse_readline(log_location):
# Extract dates from each line
match = re.match(pattern,lline)
if match:
log_date = match.group(1)
# Convert our date
log_date = datetime.datetime.strptime(log_date, "%a %b %d %H:%M:%S.%f %Y")
ftw_start = self.start
ftw_end = self.end
# If we have a log date in range
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if(log_date < ftw_start):
break
return our_logs
@pytest.fixture
def logchecker_obj():
return FooLogChecker()
|
<commit_before><commit_msg>Add journal script (needs next version of ftw pinned), remove conftest.py<commit_after>from ftw import ruleset, logchecker, testrunner
import pytest
import sys
import re
import os
import ConfigParser
def test_crs(ruleset, test, logchecker_obj, with_journal, tablename):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage_with_journal(test.ruleset_meta['name'], test, with_journal, tablename, logchecker_obj)
class FooLogChecker(logchecker.LogChecker):
def reverse_readline(self, filename):
with open(filename) as f:
f.seek(0, os.SEEK_END)
position = f.tell()
line = ''
while position >= 0:
f.seek(position)
next_char = f.read(1)
if next_char == "\n":
yield line[::-1]
line = ''
else:
line += next_char
position -= 1
yield line[::-1]
def get_logs(self):
import datetime
config = ConfigParser.ConfigParser()
config.read("settings.ini")
log_location = config.get('settings', 'log_location')
our_logs = []
pattern = re.compile(r"\[([A-Z][a-z]{2} [A-z][a-z]{2} \d{1,2} \d{1,2}\:\d{1,2}\:\d{1,2}\.\d+? \d{4})\]")
for lline in self.reverse_readline(log_location):
# Extract dates from each line
match = re.match(pattern,lline)
if match:
log_date = match.group(1)
# Convert our date
log_date = datetime.datetime.strptime(log_date, "%a %b %d %H:%M:%S.%f %Y")
ftw_start = self.start
ftw_end = self.end
# If we have a log date in range
if log_date <= ftw_end and log_date >= ftw_start:
our_logs.append(lline)
# If our log is from before FTW started stop
if(log_date < ftw_start):
break
return our_logs
@pytest.fixture
def logchecker_obj():
return FooLogChecker()
|
|
cc543e52b82761473b0a6dfd47f7451b2a5411f8
|
app/tests/cases_tests/test_forms.py
|
app/tests/cases_tests/test_forms.py
|
import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.test import Client, RequestFactory
from django.views import View
from grandchallenge.cases.views import UploadRawFiles
from tests.factories import UserFactory
@pytest.mark.django_db
def test_upload_some_images(client: Client, rf: RequestFactory):
"""
Not working?! :-(
- something with template tags goes wrong here...
response = client.get("/cases/upload/")
assert response.status_code != 200
client.force_login(test_user)
response = client.get("/cases/upload/")
assert response.status_code == 200
"""
test_user = UserFactory()
upload_view = UploadRawFiles.as_view()
upload_view: View
req = rf.get("/cases/upload/")
req.user = AnonymousUser()
response = upload_view(req)
response: HttpResponse
assert response.status_code != 200
req.user = test_user
response = upload_view(req)
response: HttpResponse
assert response.status_code == 200
|
Add tests for user frontend forms
|
Add tests for user frontend forms
Issue #479
|
Python
|
apache-2.0
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
Add tests for user frontend forms
Issue #479
|
import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.test import Client, RequestFactory
from django.views import View
from grandchallenge.cases.views import UploadRawFiles
from tests.factories import UserFactory
@pytest.mark.django_db
def test_upload_some_images(client: Client, rf: RequestFactory):
"""
Not working?! :-(
- something with template tags goes wrong here...
response = client.get("/cases/upload/")
assert response.status_code != 200
client.force_login(test_user)
response = client.get("/cases/upload/")
assert response.status_code == 200
"""
test_user = UserFactory()
upload_view = UploadRawFiles.as_view()
upload_view: View
req = rf.get("/cases/upload/")
req.user = AnonymousUser()
response = upload_view(req)
response: HttpResponse
assert response.status_code != 200
req.user = test_user
response = upload_view(req)
response: HttpResponse
assert response.status_code == 200
|
<commit_before><commit_msg>Add tests for user frontend forms
Issue #479<commit_after>
|
import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.test import Client, RequestFactory
from django.views import View
from grandchallenge.cases.views import UploadRawFiles
from tests.factories import UserFactory
@pytest.mark.django_db
def test_upload_some_images(client: Client, rf: RequestFactory):
"""
Not working?! :-(
- something with template tags goes wrong here...
response = client.get("/cases/upload/")
assert response.status_code != 200
client.force_login(test_user)
response = client.get("/cases/upload/")
assert response.status_code == 200
"""
test_user = UserFactory()
upload_view = UploadRawFiles.as_view()
upload_view: View
req = rf.get("/cases/upload/")
req.user = AnonymousUser()
response = upload_view(req)
response: HttpResponse
assert response.status_code != 200
req.user = test_user
response = upload_view(req)
response: HttpResponse
assert response.status_code == 200
|
Add tests for user frontend forms
Issue #479import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.test import Client, RequestFactory
from django.views import View
from grandchallenge.cases.views import UploadRawFiles
from tests.factories import UserFactory
@pytest.mark.django_db
def test_upload_some_images(client: Client, rf: RequestFactory):
"""
Not working?! :-(
- something with template tags goes wrong here...
response = client.get("/cases/upload/")
assert response.status_code != 200
client.force_login(test_user)
response = client.get("/cases/upload/")
assert response.status_code == 200
"""
test_user = UserFactory()
upload_view = UploadRawFiles.as_view()
upload_view: View
req = rf.get("/cases/upload/")
req.user = AnonymousUser()
response = upload_view(req)
response: HttpResponse
assert response.status_code != 200
req.user = test_user
response = upload_view(req)
response: HttpResponse
assert response.status_code == 200
|
<commit_before><commit_msg>Add tests for user frontend forms
Issue #479<commit_after>import pytest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.test import Client, RequestFactory
from django.views import View
from grandchallenge.cases.views import UploadRawFiles
from tests.factories import UserFactory
@pytest.mark.django_db
def test_upload_some_images(client: Client, rf: RequestFactory):
"""
Not working?! :-(
- something with template tags goes wrong here...
response = client.get("/cases/upload/")
assert response.status_code != 200
client.force_login(test_user)
response = client.get("/cases/upload/")
assert response.status_code == 200
"""
test_user = UserFactory()
upload_view = UploadRawFiles.as_view()
upload_view: View
req = rf.get("/cases/upload/")
req.user = AnonymousUser()
response = upload_view(req)
response: HttpResponse
assert response.status_code != 200
req.user = test_user
response = upload_view(req)
response: HttpResponse
assert response.status_code == 200
|
|
629f4b2361d9fc7a021b8fb5c302f00a6240e96c
|
tests/test_filemap_read_alt_file_map.py
|
tests/test_filemap_read_alt_file_map.py
|
import os
import re
import sys
import pytest
from mock import Mock, mock_open, patch
app_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, app_path + '/../')
from photo_rename import *
from stubs import *
@pytest.fixture
def alt_file_map_tab():
"""
Sample alternate file map.
"""
return """
abc 123 MY NEW FILE 1
xyz 999 MY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_xxx():
"""
Sample alternate file map.
"""
return """
abc 123xxxMY NEW FILE 1
xyz 999xxxMY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_dict():
"""
Sample alternate file map converted to dict.
"""
return {"abc 123": "MY NEW FILE 1", "xyz 999": "MY NEW FILE 2"}
class TestFilemapReadAltFilemap(object):
"""
Tests reading alternate file map.
"""
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_tab, alt_file_map_dict):
"""
Read alt file map and create dict with default delimiter `\t'.
"""
a = mock_open(read_data=alt_file_map_tab.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo")
assert afmd == alt_file_map_dict
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_xxx, alt_file_map_dict):
"""
Read alt file map and create dict with custom delimiter `xxx'.
"""
a = mock_open(read_data=alt_file_map_xxx.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo", delimiter="xxx")
assert afmd == alt_file_map_dict
|
Add new unit tests for reading alternate file map.
|
Add new unit tests for reading alternate file map.
|
Python
|
mit
|
eigenholser/jpeg_rename,eigenholser/jpeg_rename,eigenholser/jpeg_rename,eigenholser/jpeg_rename
|
Add new unit tests for reading alternate file map.
|
import os
import re
import sys
import pytest
from mock import Mock, mock_open, patch
app_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, app_path + '/../')
from photo_rename import *
from stubs import *
@pytest.fixture
def alt_file_map_tab():
"""
Sample alternate file map.
"""
return """
abc 123 MY NEW FILE 1
xyz 999 MY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_xxx():
"""
Sample alternate file map.
"""
return """
abc 123xxxMY NEW FILE 1
xyz 999xxxMY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_dict():
"""
Sample alternate file map converted to dict.
"""
return {"abc 123": "MY NEW FILE 1", "xyz 999": "MY NEW FILE 2"}
class TestFilemapReadAltFilemap(object):
"""
Tests reading alternate file map.
"""
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_tab, alt_file_map_dict):
"""
Read alt file map and create dict with default delimiter `\t'.
"""
a = mock_open(read_data=alt_file_map_tab.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo")
assert afmd == alt_file_map_dict
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_xxx, alt_file_map_dict):
"""
Read alt file map and create dict with custom delimiter `xxx'.
"""
a = mock_open(read_data=alt_file_map_xxx.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo", delimiter="xxx")
assert afmd == alt_file_map_dict
|
<commit_before><commit_msg>Add new unit tests for reading alternate file map.<commit_after>
|
import os
import re
import sys
import pytest
from mock import Mock, mock_open, patch
app_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, app_path + '/../')
from photo_rename import *
from stubs import *
@pytest.fixture
def alt_file_map_tab():
"""
Sample alternate file map.
"""
return """
abc 123 MY NEW FILE 1
xyz 999 MY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_xxx():
"""
Sample alternate file map.
"""
return """
abc 123xxxMY NEW FILE 1
xyz 999xxxMY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_dict():
"""
Sample alternate file map converted to dict.
"""
return {"abc 123": "MY NEW FILE 1", "xyz 999": "MY NEW FILE 2"}
class TestFilemapReadAltFilemap(object):
"""
Tests reading alternate file map.
"""
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_tab, alt_file_map_dict):
"""
Read alt file map and create dict with default delimiter `\t'.
"""
a = mock_open(read_data=alt_file_map_tab.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo")
assert afmd == alt_file_map_dict
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_xxx, alt_file_map_dict):
"""
Read alt file map and create dict with custom delimiter `xxx'.
"""
a = mock_open(read_data=alt_file_map_xxx.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo", delimiter="xxx")
assert afmd == alt_file_map_dict
|
Add new unit tests for reading alternate file map.import os
import re
import sys
import pytest
from mock import Mock, mock_open, patch
app_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, app_path + '/../')
from photo_rename import *
from stubs import *
@pytest.fixture
def alt_file_map_tab():
"""
Sample alternate file map.
"""
return """
abc 123 MY NEW FILE 1
xyz 999 MY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_xxx():
"""
Sample alternate file map.
"""
return """
abc 123xxxMY NEW FILE 1
xyz 999xxxMY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_dict():
"""
Sample alternate file map converted to dict.
"""
return {"abc 123": "MY NEW FILE 1", "xyz 999": "MY NEW FILE 2"}
class TestFilemapReadAltFilemap(object):
"""
Tests reading alternate file map.
"""
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_tab, alt_file_map_dict):
"""
Read alt file map and create dict with default delimiter `\t'.
"""
a = mock_open(read_data=alt_file_map_tab.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo")
assert afmd == alt_file_map_dict
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_xxx, alt_file_map_dict):
"""
Read alt file map and create dict with custom delimiter `xxx'.
"""
a = mock_open(read_data=alt_file_map_xxx.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo", delimiter="xxx")
assert afmd == alt_file_map_dict
|
<commit_before><commit_msg>Add new unit tests for reading alternate file map.<commit_after>import os
import re
import sys
import pytest
from mock import Mock, mock_open, patch
app_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, app_path + '/../')
from photo_rename import *
from stubs import *
@pytest.fixture
def alt_file_map_tab():
"""
Sample alternate file map.
"""
return """
abc 123 MY NEW FILE 1
xyz 999 MY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_xxx():
"""
Sample alternate file map.
"""
return """
abc 123xxxMY NEW FILE 1
xyz 999xxxMY NEW FILE 2
"""
@pytest.fixture
def alt_file_map_dict():
"""
Sample alternate file map converted to dict.
"""
return {"abc 123": "MY NEW FILE 1", "xyz 999": "MY NEW FILE 2"}
class TestFilemapReadAltFilemap(object):
"""
Tests reading alternate file map.
"""
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_tab, alt_file_map_dict):
"""
Read alt file map and create dict with default delimiter `\t'.
"""
a = mock_open(read_data=alt_file_map_tab.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo")
assert afmd == alt_file_map_dict
@pytest.mark.skipif(RUN_TEST, reason="Work in progress")
def test_read_alt_file_map(self, alt_file_map_xxx, alt_file_map_dict):
"""
Read alt file map and create dict with custom delimiter `xxx'.
"""
a = mock_open(read_data=alt_file_map_xxx.strip())
with patch('builtins.open', a) as m:
afmd = read_alt_file_map("foo", delimiter="xxx")
assert afmd == alt_file_map_dict
|
|
62009e7ea13360a71a5294d73411f4b802610aae
|
grizli/pipeline/run_MPI.py
|
grizli/pipeline/run_MPI.py
|
"""
Script to run all redshift fits in parallel with OpenMPI
Usage:
mpiexec -n 10 python -m mpi4py.futures $GRIZLICODE/grizli/pipeline/run_MPI.py
where "-n 8" indicates running 8 parallel threads.
Needs 'fit_args.py' created by `auto_script.generate_fit_params`.
"""
import os
import glob
import numpy as np
import drizzlepac
import matplotlib.pyplot as plt
plt.ioff()
from mpi4py.futures import MPIPoolExecutor
from grizli.fitting import run_all_parallel
from grizli import utils
utils.set_warnings()
def find_ids():
# Find objects that with extarcted spectra and that need to be fit
all_files=glob.glob('*beams.fits')
files = []
for file in all_files:
if not os.path.exists(file.replace('beams.fits', 'full.fits')):
files.append(file)
print('{0} files to fit'.format(len(files)))
ids = [int(file.split('_')[1].split('.')[0]) for file in files]
return ids
if __name__ == '__main__':
import time
t1 = time.time()
ids = find_ids()
if len(ids) == 0:
exit()
with MPIPoolExecutor() as executor:
res = executor.map(run_all_parallel, ids)
for ix in res:
print(' Done, id={0} / status={1}, t={2:.1f}'.format(ix[0], ix[1], ix[2]))
t2 = time.time()
print('MPIPool: {0:.1f}'.format(t2-t1))
|
Add file for running fits with OpenMPI
|
Add file for running fits with OpenMPI
|
Python
|
mit
|
albertfxwang/grizli
|
Add file for running fits with OpenMPI
|
"""
Script to run all redshift fits in parallel with OpenMPI
Usage:
mpiexec -n 10 python -m mpi4py.futures $GRIZLICODE/grizli/pipeline/run_MPI.py
where "-n 8" indicates running 8 parallel threads.
Needs 'fit_args.py' created by `auto_script.generate_fit_params`.
"""
import os
import glob
import numpy as np
import drizzlepac
import matplotlib.pyplot as plt
plt.ioff()
from mpi4py.futures import MPIPoolExecutor
from grizli.fitting import run_all_parallel
from grizli import utils
utils.set_warnings()
def find_ids():
# Find objects that with extarcted spectra and that need to be fit
all_files=glob.glob('*beams.fits')
files = []
for file in all_files:
if not os.path.exists(file.replace('beams.fits', 'full.fits')):
files.append(file)
print('{0} files to fit'.format(len(files)))
ids = [int(file.split('_')[1].split('.')[0]) for file in files]
return ids
if __name__ == '__main__':
import time
t1 = time.time()
ids = find_ids()
if len(ids) == 0:
exit()
with MPIPoolExecutor() as executor:
res = executor.map(run_all_parallel, ids)
for ix in res:
print(' Done, id={0} / status={1}, t={2:.1f}'.format(ix[0], ix[1], ix[2]))
t2 = time.time()
print('MPIPool: {0:.1f}'.format(t2-t1))
|
<commit_before><commit_msg>Add file for running fits with OpenMPI<commit_after>
|
"""
Script to run all redshift fits in parallel with OpenMPI
Usage:
mpiexec -n 10 python -m mpi4py.futures $GRIZLICODE/grizli/pipeline/run_MPI.py
where "-n 8" indicates running 8 parallel threads.
Needs 'fit_args.py' created by `auto_script.generate_fit_params`.
"""
import os
import glob
import numpy as np
import drizzlepac
import matplotlib.pyplot as plt
plt.ioff()
from mpi4py.futures import MPIPoolExecutor
from grizli.fitting import run_all_parallel
from grizli import utils
utils.set_warnings()
def find_ids():
# Find objects that with extarcted spectra and that need to be fit
all_files=glob.glob('*beams.fits')
files = []
for file in all_files:
if not os.path.exists(file.replace('beams.fits', 'full.fits')):
files.append(file)
print('{0} files to fit'.format(len(files)))
ids = [int(file.split('_')[1].split('.')[0]) for file in files]
return ids
if __name__ == '__main__':
import time
t1 = time.time()
ids = find_ids()
if len(ids) == 0:
exit()
with MPIPoolExecutor() as executor:
res = executor.map(run_all_parallel, ids)
for ix in res:
print(' Done, id={0} / status={1}, t={2:.1f}'.format(ix[0], ix[1], ix[2]))
t2 = time.time()
print('MPIPool: {0:.1f}'.format(t2-t1))
|
Add file for running fits with OpenMPI"""
Script to run all redshift fits in parallel with OpenMPI
Usage:
mpiexec -n 10 python -m mpi4py.futures $GRIZLICODE/grizli/pipeline/run_MPI.py
where "-n 8" indicates running 8 parallel threads.
Needs 'fit_args.py' created by `auto_script.generate_fit_params`.
"""
import os
import glob
import numpy as np
import drizzlepac
import matplotlib.pyplot as plt
plt.ioff()
from mpi4py.futures import MPIPoolExecutor
from grizli.fitting import run_all_parallel
from grizli import utils
utils.set_warnings()
def find_ids():
# Find objects that with extarcted spectra and that need to be fit
all_files=glob.glob('*beams.fits')
files = []
for file in all_files:
if not os.path.exists(file.replace('beams.fits', 'full.fits')):
files.append(file)
print('{0} files to fit'.format(len(files)))
ids = [int(file.split('_')[1].split('.')[0]) for file in files]
return ids
if __name__ == '__main__':
import time
t1 = time.time()
ids = find_ids()
if len(ids) == 0:
exit()
with MPIPoolExecutor() as executor:
res = executor.map(run_all_parallel, ids)
for ix in res:
print(' Done, id={0} / status={1}, t={2:.1f}'.format(ix[0], ix[1], ix[2]))
t2 = time.time()
print('MPIPool: {0:.1f}'.format(t2-t1))
|
<commit_before><commit_msg>Add file for running fits with OpenMPI<commit_after>"""
Script to run all redshift fits in parallel with OpenMPI
Usage:
mpiexec -n 10 python -m mpi4py.futures $GRIZLICODE/grizli/pipeline/run_MPI.py
where "-n 8" indicates running 8 parallel threads.
Needs 'fit_args.py' created by `auto_script.generate_fit_params`.
"""
import os
import glob
import numpy as np
import drizzlepac
import matplotlib.pyplot as plt
plt.ioff()
from mpi4py.futures import MPIPoolExecutor
from grizli.fitting import run_all_parallel
from grizli import utils
utils.set_warnings()
def find_ids():
# Find objects that with extarcted spectra and that need to be fit
all_files=glob.glob('*beams.fits')
files = []
for file in all_files:
if not os.path.exists(file.replace('beams.fits', 'full.fits')):
files.append(file)
print('{0} files to fit'.format(len(files)))
ids = [int(file.split('_')[1].split('.')[0]) for file in files]
return ids
if __name__ == '__main__':
import time
t1 = time.time()
ids = find_ids()
if len(ids) == 0:
exit()
with MPIPoolExecutor() as executor:
res = executor.map(run_all_parallel, ids)
for ix in res:
print(' Done, id={0} / status={1}, t={2:.1f}'.format(ix[0], ix[1], ix[2]))
t2 = time.time()
print('MPIPool: {0:.1f}'.format(t2-t1))
|
|
76a4b872101a41eddf583866d675aebeeb815f59
|
dojo/tools/h1/parser.py
|
dojo/tools/h1/parser.py
|
import json
import hashlib
from urllib.parse import urlparse
from dojo.models import Endpoint, Finding
__author__ = 'Kirill Gotsman'
class HackerOneJSONParser(object):
"""
A class that can be used to parse the Get All Reports JSON export from HackerOne API.
"""
def __init__(self, file, test):
"""
Converts a HackerOne reports to a DefectDojo finding
"""
self.dupes = dict()
# Start with an empty findings
self.items = ()
# Exit if file is not provided
if file is None:
return
# Load the contents of the JSON file into a dictionary
data = file.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
# Conver JSON report to DefectDojo format
for content in tree["data"]:
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
# Build the description of the Dojo finding
description = content["attributes"]["vulnerability_information"]
# Build the severity of the Dojo finding
try:
severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize()
if severity not in ["Low", "Medium", "Hight", "Critical"]:
severity = "Info"
except:
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(content.get("id"))
references = "[{}]({})".format(ref_link, ref_link)
# Set active state of the Dojo finding
if content["attributes"]["state"] in ["triaged", "new"]:
active=True
else:
active=False
# Set CWE of the Dojo finding
try:
cwe = int(content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:])
except:
cwe = 0
dupe_key = hashlib.md5(str(references + title).encode('utf-8')).hexdigest()
if dupe_key in self.dupes:
finding = self.dupes[dupe_key]
if finding.references:
finding.references = finding.references
self.dupes[dupe_key] = finding
else:
self.dupes[dupe_key] = True
# Build and return Finding model
finding = Finding(
title=title,
test=test,
active=active,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(severity),
mitigation="See description",
impact="No impact provided",
references=references,
cwe=cwe,
dynamic_finding=False,)
finding.unsaved_endpoints = list()
self.dupes[dupe_key] = finding
self.items = self.dupes.values()
|
Add functionality of importing hackerone reports add-hackeroneparcer-359
|
Add functionality of importing hackerone reports add-hackeroneparcer-359
|
Python
|
bsd-3-clause
|
rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo
|
Add functionality of importing hackerone reports add-hackeroneparcer-359
|
import json
import hashlib
from urllib.parse import urlparse
from dojo.models import Endpoint, Finding
__author__ = 'Kirill Gotsman'
class HackerOneJSONParser(object):
"""
A class that can be used to parse the Get All Reports JSON export from HackerOne API.
"""
def __init__(self, file, test):
"""
Converts a HackerOne reports to a DefectDojo finding
"""
self.dupes = dict()
# Start with an empty findings
self.items = ()
# Exit if file is not provided
if file is None:
return
# Load the contents of the JSON file into a dictionary
data = file.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
# Conver JSON report to DefectDojo format
for content in tree["data"]:
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
# Build the description of the Dojo finding
description = content["attributes"]["vulnerability_information"]
# Build the severity of the Dojo finding
try:
severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize()
if severity not in ["Low", "Medium", "Hight", "Critical"]:
severity = "Info"
except:
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(content.get("id"))
references = "[{}]({})".format(ref_link, ref_link)
# Set active state of the Dojo finding
if content["attributes"]["state"] in ["triaged", "new"]:
active=True
else:
active=False
# Set CWE of the Dojo finding
try:
cwe = int(content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:])
except:
cwe = 0
dupe_key = hashlib.md5(str(references + title).encode('utf-8')).hexdigest()
if dupe_key in self.dupes:
finding = self.dupes[dupe_key]
if finding.references:
finding.references = finding.references
self.dupes[dupe_key] = finding
else:
self.dupes[dupe_key] = True
# Build and return Finding model
finding = Finding(
title=title,
test=test,
active=active,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(severity),
mitigation="See description",
impact="No impact provided",
references=references,
cwe=cwe,
dynamic_finding=False,)
finding.unsaved_endpoints = list()
self.dupes[dupe_key] = finding
self.items = self.dupes.values()
|
<commit_before><commit_msg>Add functionality of importing hackerone reports add-hackeroneparcer-359<commit_after>
|
import json
import hashlib
from urllib.parse import urlparse
from dojo.models import Endpoint, Finding
__author__ = 'Kirill Gotsman'
class HackerOneJSONParser(object):
"""
A class that can be used to parse the Get All Reports JSON export from HackerOne API.
"""
def __init__(self, file, test):
"""
Converts a HackerOne reports to a DefectDojo finding
"""
self.dupes = dict()
# Start with an empty findings
self.items = ()
# Exit if file is not provided
if file is None:
return
# Load the contents of the JSON file into a dictionary
data = file.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
# Conver JSON report to DefectDojo format
for content in tree["data"]:
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
# Build the description of the Dojo finding
description = content["attributes"]["vulnerability_information"]
# Build the severity of the Dojo finding
try:
severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize()
if severity not in ["Low", "Medium", "Hight", "Critical"]:
severity = "Info"
except:
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(content.get("id"))
references = "[{}]({})".format(ref_link, ref_link)
# Set active state of the Dojo finding
if content["attributes"]["state"] in ["triaged", "new"]:
active=True
else:
active=False
# Set CWE of the Dojo finding
try:
cwe = int(content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:])
except:
cwe = 0
dupe_key = hashlib.md5(str(references + title).encode('utf-8')).hexdigest()
if dupe_key in self.dupes:
finding = self.dupes[dupe_key]
if finding.references:
finding.references = finding.references
self.dupes[dupe_key] = finding
else:
self.dupes[dupe_key] = True
# Build and return Finding model
finding = Finding(
title=title,
test=test,
active=active,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(severity),
mitigation="See description",
impact="No impact provided",
references=references,
cwe=cwe,
dynamic_finding=False,)
finding.unsaved_endpoints = list()
self.dupes[dupe_key] = finding
self.items = self.dupes.values()
|
Add functionality of importing hackerone reports add-hackeroneparcer-359import json
import hashlib
from urllib.parse import urlparse
from dojo.models import Endpoint, Finding
__author__ = 'Kirill Gotsman'
class HackerOneJSONParser(object):
"""
A class that can be used to parse the Get All Reports JSON export from HackerOne API.
"""
def __init__(self, file, test):
"""
Converts a HackerOne reports to a DefectDojo finding
"""
self.dupes = dict()
# Start with an empty findings
self.items = ()
# Exit if file is not provided
if file is None:
return
# Load the contents of the JSON file into a dictionary
data = file.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
# Conver JSON report to DefectDojo format
for content in tree["data"]:
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
# Build the description of the Dojo finding
description = content["attributes"]["vulnerability_information"]
# Build the severity of the Dojo finding
try:
severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize()
if severity not in ["Low", "Medium", "Hight", "Critical"]:
severity = "Info"
except:
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(content.get("id"))
references = "[{}]({})".format(ref_link, ref_link)
# Set active state of the Dojo finding
if content["attributes"]["state"] in ["triaged", "new"]:
active=True
else:
active=False
# Set CWE of the Dojo finding
try:
cwe = int(content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:])
except:
cwe = 0
dupe_key = hashlib.md5(str(references + title).encode('utf-8')).hexdigest()
if dupe_key in self.dupes:
finding = self.dupes[dupe_key]
if finding.references:
finding.references = finding.references
self.dupes[dupe_key] = finding
else:
self.dupes[dupe_key] = True
# Build and return Finding model
finding = Finding(
title=title,
test=test,
active=active,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(severity),
mitigation="See description",
impact="No impact provided",
references=references,
cwe=cwe,
dynamic_finding=False,)
finding.unsaved_endpoints = list()
self.dupes[dupe_key] = finding
self.items = self.dupes.values()
|
<commit_before><commit_msg>Add functionality of importing hackerone reports add-hackeroneparcer-359<commit_after>import json
import hashlib
from urllib.parse import urlparse
from dojo.models import Endpoint, Finding
__author__ = 'Kirill Gotsman'
class HackerOneJSONParser(object):
"""
A class that can be used to parse the Get All Reports JSON export from HackerOne API.
"""
def __init__(self, file, test):
"""
Converts a HackerOne reports to a DefectDojo finding
"""
self.dupes = dict()
# Start with an empty findings
self.items = ()
# Exit if file is not provided
if file is None:
return
# Load the contents of the JSON file into a dictionary
data = file.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
# Conver JSON report to DefectDojo format
for content in tree["data"]:
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
# Build the description of the Dojo finding
description = content["attributes"]["vulnerability_information"]
# Build the severity of the Dojo finding
try:
severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize()
if severity not in ["Low", "Medium", "Hight", "Critical"]:
severity = "Info"
except:
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(content.get("id"))
references = "[{}]({})".format(ref_link, ref_link)
# Set active state of the Dojo finding
if content["attributes"]["state"] in ["triaged", "new"]:
active=True
else:
active=False
# Set CWE of the Dojo finding
try:
cwe = int(content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:])
except:
cwe = 0
dupe_key = hashlib.md5(str(references + title).encode('utf-8')).hexdigest()
if dupe_key in self.dupes:
finding = self.dupes[dupe_key]
if finding.references:
finding.references = finding.references
self.dupes[dupe_key] = finding
else:
self.dupes[dupe_key] = True
# Build and return Finding model
finding = Finding(
title=title,
test=test,
active=active,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(severity),
mitigation="See description",
impact="No impact provided",
references=references,
cwe=cwe,
dynamic_finding=False,)
finding.unsaved_endpoints = list()
self.dupes[dupe_key] = finding
self.items = self.dupes.values()
|
|
c77c4e0bf8d9fe12b7f11ee1fb0827e259c6727c
|
resnet_sound.py
|
resnet_sound.py
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
SEED = 1337
from keras.datasets import mnist
from keras.utils import np_utils
from keras import backend as K
from bird.models.resnet import ResNetBuilder
from bird.preprocessing.sound import SoundDataGenerator
batch_size = 128
nb_classes = 3
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 224, 1024
# number of channels
nb_channels = 3
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
model = ResNetBuilder.build_resnet_18((img_rows, img_cols, nb_channels), nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = SoundDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'./resnet_sound_data/train', # this is the target directory
target_size=(img_rows, img_cols), # all images will be resized to 150x150
batch_size=16,
class_mode='categorical',
save_to_dir='./resnet_sound_images',
seed=SEED) # since we use binary_crossentropy loss, we need binary labels
model.fit_generator(
train_generator,
samples_per_epoch=3,
nb_epoch=1)
model.save_weights('resnet.h5')
# score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
Add resnet training scheme (draft)
|
Add resnet training scheme (draft)
|
Python
|
mit
|
johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification
|
Add resnet training scheme (draft)
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
SEED = 1337
from keras.datasets import mnist
from keras.utils import np_utils
from keras import backend as K
from bird.models.resnet import ResNetBuilder
from bird.preprocessing.sound import SoundDataGenerator
batch_size = 128
nb_classes = 3
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 224, 1024
# number of channels
nb_channels = 3
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
model = ResNetBuilder.build_resnet_18((img_rows, img_cols, nb_channels), nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = SoundDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'./resnet_sound_data/train', # this is the target directory
target_size=(img_rows, img_cols), # all images will be resized to 150x150
batch_size=16,
class_mode='categorical',
save_to_dir='./resnet_sound_images',
seed=SEED) # since we use binary_crossentropy loss, we need binary labels
model.fit_generator(
train_generator,
samples_per_epoch=3,
nb_epoch=1)
model.save_weights('resnet.h5')
# score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
<commit_before><commit_msg>Add resnet training scheme (draft)<commit_after>
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
SEED = 1337
from keras.datasets import mnist
from keras.utils import np_utils
from keras import backend as K
from bird.models.resnet import ResNetBuilder
from bird.preprocessing.sound import SoundDataGenerator
batch_size = 128
nb_classes = 3
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 224, 1024
# number of channels
nb_channels = 3
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
model = ResNetBuilder.build_resnet_18((img_rows, img_cols, nb_channels), nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = SoundDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'./resnet_sound_data/train', # this is the target directory
target_size=(img_rows, img_cols), # all images will be resized to 150x150
batch_size=16,
class_mode='categorical',
save_to_dir='./resnet_sound_images',
seed=SEED) # since we use binary_crossentropy loss, we need binary labels
model.fit_generator(
train_generator,
samples_per_epoch=3,
nb_epoch=1)
model.save_weights('resnet.h5')
# score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
Add resnet training scheme (draft)
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
SEED = 1337
from keras.datasets import mnist
from keras.utils import np_utils
from keras import backend as K
from bird.models.resnet import ResNetBuilder
from bird.preprocessing.sound import SoundDataGenerator
batch_size = 128
nb_classes = 3
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 224, 1024
# number of channels
nb_channels = 3
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
model = ResNetBuilder.build_resnet_18((img_rows, img_cols, nb_channels), nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = SoundDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'./resnet_sound_data/train', # this is the target directory
target_size=(img_rows, img_cols), # all images will be resized to 150x150
batch_size=16,
class_mode='categorical',
save_to_dir='./resnet_sound_images',
seed=SEED) # since we use binary_crossentropy loss, we need binary labels
model.fit_generator(
train_generator,
samples_per_epoch=3,
nb_epoch=1)
model.save_weights('resnet.h5')
# score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
<commit_before><commit_msg>Add resnet training scheme (draft)<commit_after>
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
SEED = 1337
from keras.datasets import mnist
from keras.utils import np_utils
from keras import backend as K
from bird.models.resnet import ResNetBuilder
from bird.preprocessing.sound import SoundDataGenerator
batch_size = 128
nb_classes = 3
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 224, 1024
# number of channels
nb_channels = 3
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
model = ResNetBuilder.build_resnet_18((img_rows, img_cols, nb_channels), nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = SoundDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'./resnet_sound_data/train', # this is the target directory
target_size=(img_rows, img_cols), # all images will be resized to 150x150
batch_size=16,
class_mode='categorical',
save_to_dir='./resnet_sound_images',
seed=SEED) # since we use binary_crossentropy loss, we need binary labels
model.fit_generator(
train_generator,
samples_per_epoch=3,
nb_epoch=1)
model.save_weights('resnet.h5')
# score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
|
3e06c319b08bbb3e1cb2691a7db8c5cd1675c241
|
tests/test_generate_copy_without_render.py
|
tests/test_generate_copy_without_render.py
|
def test_generate_copy_without_render_extensions(self):
generate.generate_files(
context={
'cookiecutter': {
"repo_name": "test_copy_without_render",
"render_test": "I have been rendered!",
"_copy_without_render": [
"*not-rendered",
"rendered/not_rendered.yml",
"*.txt",
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
self.assertIn("{{cookiecutter.repo_name}}-not-rendered",
os.listdir("test_copy_without_render"))
self.assertIn("test_copy_without_render-rendered",
os.listdir("test_copy_without_render"))
with open("test_copy_without_render/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/README.rst") as f:
self.assertIn("I have been rendered!", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.rst") as f:
self.assertIn("I have been rendered", f.read())
with open("test_copy_without_render/{{cookiecutter.repo_name}}-not-rendered/README.rst") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/rendered/not_rendered.yml") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
if os.path.exists('test_copy_without_render'):
shutil.rmtree('test_copy_without_render')
|
Copy over test from the PR into its own module
|
Copy over test from the PR into its own module
|
Python
|
bsd-3-clause
|
cguardia/cookiecutter,christabor/cookiecutter,michaeljoseph/cookiecutter,dajose/cookiecutter,ramiroluz/cookiecutter,vintasoftware/cookiecutter,luzfcb/cookiecutter,Vauxoo/cookiecutter,0k/cookiecutter,venumech/cookiecutter,vintasoftware/cookiecutter,agconti/cookiecutter,tylerdave/cookiecutter,cguardia/cookiecutter,cichm/cookiecutter,agconti/cookiecutter,vincentbernat/cookiecutter,sp1rs/cookiecutter,Vauxoo/cookiecutter,willingc/cookiecutter,stevepiercy/cookiecutter,audreyr/cookiecutter,atlassian/cookiecutter,drgarcia1986/cookiecutter,moi65/cookiecutter,tylerdave/cookiecutter,venumech/cookiecutter,ionelmc/cookiecutter,christabor/cookiecutter,ramiroluz/cookiecutter,pjbull/cookiecutter,benthomasson/cookiecutter,Springerle/cookiecutter,willingc/cookiecutter,benthomasson/cookiecutter,dajose/cookiecutter,foodszhang/cookiecutter,hackebrot/cookiecutter,moi65/cookiecutter,janusnic/cookiecutter,nhomar/cookiecutter,lgp171188/cookiecutter,atlassian/cookiecutter,kkujawinski/cookiecutter,lgp171188/cookiecutter,luzfcb/cookiecutter,cichm/cookiecutter,drgarcia1986/cookiecutter,0k/cookiecutter,Springerle/cookiecutter,audreyr/cookiecutter,takeflight/cookiecutter,nhomar/cookiecutter,ionelmc/cookiecutter,terryjbates/cookiecutter,pjbull/cookiecutter,takeflight/cookiecutter,foodszhang/cookiecutter,janusnic/cookiecutter,stevepiercy/cookiecutter,michaeljoseph/cookiecutter,lucius-feng/cookiecutter,kkujawinski/cookiecutter,vincentbernat/cookiecutter,sp1rs/cookiecutter,hackebrot/cookiecutter,lucius-feng/cookiecutter,terryjbates/cookiecutter
|
Copy over test from the PR into its own module
|
def test_generate_copy_without_render_extensions(self):
generate.generate_files(
context={
'cookiecutter': {
"repo_name": "test_copy_without_render",
"render_test": "I have been rendered!",
"_copy_without_render": [
"*not-rendered",
"rendered/not_rendered.yml",
"*.txt",
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
self.assertIn("{{cookiecutter.repo_name}}-not-rendered",
os.listdir("test_copy_without_render"))
self.assertIn("test_copy_without_render-rendered",
os.listdir("test_copy_without_render"))
with open("test_copy_without_render/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/README.rst") as f:
self.assertIn("I have been rendered!", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.rst") as f:
self.assertIn("I have been rendered", f.read())
with open("test_copy_without_render/{{cookiecutter.repo_name}}-not-rendered/README.rst") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/rendered/not_rendered.yml") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
if os.path.exists('test_copy_without_render'):
shutil.rmtree('test_copy_without_render')
|
<commit_before><commit_msg>Copy over test from the PR into its own module<commit_after>
|
def test_generate_copy_without_render_extensions(self):
generate.generate_files(
context={
'cookiecutter': {
"repo_name": "test_copy_without_render",
"render_test": "I have been rendered!",
"_copy_without_render": [
"*not-rendered",
"rendered/not_rendered.yml",
"*.txt",
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
self.assertIn("{{cookiecutter.repo_name}}-not-rendered",
os.listdir("test_copy_without_render"))
self.assertIn("test_copy_without_render-rendered",
os.listdir("test_copy_without_render"))
with open("test_copy_without_render/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/README.rst") as f:
self.assertIn("I have been rendered!", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.rst") as f:
self.assertIn("I have been rendered", f.read())
with open("test_copy_without_render/{{cookiecutter.repo_name}}-not-rendered/README.rst") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/rendered/not_rendered.yml") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
if os.path.exists('test_copy_without_render'):
shutil.rmtree('test_copy_without_render')
|
Copy over test from the PR into its own moduledef test_generate_copy_without_render_extensions(self):
generate.generate_files(
context={
'cookiecutter': {
"repo_name": "test_copy_without_render",
"render_test": "I have been rendered!",
"_copy_without_render": [
"*not-rendered",
"rendered/not_rendered.yml",
"*.txt",
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
self.assertIn("{{cookiecutter.repo_name}}-not-rendered",
os.listdir("test_copy_without_render"))
self.assertIn("test_copy_without_render-rendered",
os.listdir("test_copy_without_render"))
with open("test_copy_without_render/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/README.rst") as f:
self.assertIn("I have been rendered!", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.rst") as f:
self.assertIn("I have been rendered", f.read())
with open("test_copy_without_render/{{cookiecutter.repo_name}}-not-rendered/README.rst") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/rendered/not_rendered.yml") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
if os.path.exists('test_copy_without_render'):
shutil.rmtree('test_copy_without_render')
|
<commit_before><commit_msg>Copy over test from the PR into its own module<commit_after>def test_generate_copy_without_render_extensions(self):
generate.generate_files(
context={
'cookiecutter': {
"repo_name": "test_copy_without_render",
"render_test": "I have been rendered!",
"_copy_without_render": [
"*not-rendered",
"rendered/not_rendered.yml",
"*.txt",
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
self.assertIn("{{cookiecutter.repo_name}}-not-rendered",
os.listdir("test_copy_without_render"))
self.assertIn("test_copy_without_render-rendered",
os.listdir("test_copy_without_render"))
with open("test_copy_without_render/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/README.rst") as f:
self.assertIn("I have been rendered!", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.txt") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/test_copy_without_render-rendered/README.rst") as f:
self.assertIn("I have been rendered", f.read())
with open("test_copy_without_render/{{cookiecutter.repo_name}}-not-rendered/README.rst") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
with open("test_copy_without_render/rendered/not_rendered.yml") as f:
self.assertIn("{{cookiecutter.render_test}}", f.read())
if os.path.exists('test_copy_without_render'):
shutil.rmtree('test_copy_without_render')
|
|
3bf0c34c256e0c94475283bbeffdbc8fb384aa25
|
tests/test_tool_placement.py
|
tests/test_tool_placement.py
|
import pytest
from gi.repository import Gtk
from gaphas.item import Element
from gaphas.tool.placement import PlacementState, on_drag_begin, placement_tool
@pytest.fixture
def tool_factory(connections):
def tool_factory():
return Element(connections)
return tool_factory
def test_can_create_placement_tool(view, tool_factory):
tool = placement_tool(view, tool_factory, 2)
assert isinstance(tool, Gtk.Gesture)
def test_create_new_element(view, tool_factory, window):
state = PlacementState(tool_factory, 2)
tool = placement_tool(view, tool_factory, 2)
on_drag_begin(tool, 0, 0, state)
assert state.moving
|
Add some extra tests for placement
|
Add some extra tests for placement
|
Python
|
lgpl-2.1
|
amolenaar/gaphas
|
Add some extra tests for placement
|
import pytest
from gi.repository import Gtk
from gaphas.item import Element
from gaphas.tool.placement import PlacementState, on_drag_begin, placement_tool
@pytest.fixture
def tool_factory(connections):
def tool_factory():
return Element(connections)
return tool_factory
def test_can_create_placement_tool(view, tool_factory):
tool = placement_tool(view, tool_factory, 2)
assert isinstance(tool, Gtk.Gesture)
def test_create_new_element(view, tool_factory, window):
state = PlacementState(tool_factory, 2)
tool = placement_tool(view, tool_factory, 2)
on_drag_begin(tool, 0, 0, state)
assert state.moving
|
<commit_before><commit_msg>Add some extra tests for placement<commit_after>
|
import pytest
from gi.repository import Gtk
from gaphas.item import Element
from gaphas.tool.placement import PlacementState, on_drag_begin, placement_tool
@pytest.fixture
def tool_factory(connections):
def tool_factory():
return Element(connections)
return tool_factory
def test_can_create_placement_tool(view, tool_factory):
tool = placement_tool(view, tool_factory, 2)
assert isinstance(tool, Gtk.Gesture)
def test_create_new_element(view, tool_factory, window):
state = PlacementState(tool_factory, 2)
tool = placement_tool(view, tool_factory, 2)
on_drag_begin(tool, 0, 0, state)
assert state.moving
|
Add some extra tests for placementimport pytest
from gi.repository import Gtk
from gaphas.item import Element
from gaphas.tool.placement import PlacementState, on_drag_begin, placement_tool
@pytest.fixture
def tool_factory(connections):
def tool_factory():
return Element(connections)
return tool_factory
def test_can_create_placement_tool(view, tool_factory):
tool = placement_tool(view, tool_factory, 2)
assert isinstance(tool, Gtk.Gesture)
def test_create_new_element(view, tool_factory, window):
state = PlacementState(tool_factory, 2)
tool = placement_tool(view, tool_factory, 2)
on_drag_begin(tool, 0, 0, state)
assert state.moving
|
<commit_before><commit_msg>Add some extra tests for placement<commit_after>import pytest
from gi.repository import Gtk
from gaphas.item import Element
from gaphas.tool.placement import PlacementState, on_drag_begin, placement_tool
@pytest.fixture
def tool_factory(connections):
def tool_factory():
return Element(connections)
return tool_factory
def test_can_create_placement_tool(view, tool_factory):
tool = placement_tool(view, tool_factory, 2)
assert isinstance(tool, Gtk.Gesture)
def test_create_new_element(view, tool_factory, window):
state = PlacementState(tool_factory, 2)
tool = placement_tool(view, tool_factory, 2)
on_drag_begin(tool, 0, 0, state)
assert state.moving
|
|
8baf19e38ea7c51679d2e5ed32e8519a3290f20c
|
code/print_analysis_files.py
|
code/print_analysis_files.py
|
import synthetic_data_experiments as sde
import logging
if __name__ == "__main__":
args = sde.get_integrous_arguments_values()
for repeat_idx in xrange(args.num_repeats) :
resu_dir = "%s/repeat_%d" % (args.resu_dir, repeat_idx)
data_dir = '%s/repeat_%d' % (args.data_dir, repeat_idx)
trIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.trIndices'
teIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.teIndices'
ssIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.ss%d.ssIndices'
xp_indices = [{'trIndices': list(), 'teIndices':list(), 'ssIndices':list()} for fold in xrange(args.num_folds)]
for fold_idx in xrange (args.num_folds) :
with open(trIndices_fname %(fold_idx), 'r') as trIndices_f :
line = trIndices_f.readline().split()
xp_indices[fold_idx]["trIndices"] = [int (i) for i in line ]
with open(teIndices_fname %(fold_idx),'r') as teIndices_f :
line = teIndices_f.readline().split()
xp_indices[fold_idx]["teIndices"] = [int (i) for i in line ]
for ss_idx in xrange (args.num_subsamples) :
with open(ssIndices_fname %(fold_idx,ss_idx), 'r') as ssIndices_f:
line = ssIndices_f.readline().split()
xp_indices[fold_idx]["ssIndices"].append( [int (i) for i in line ] )
sde.print_analysis_files( args, resu_dir, data_dir, xp_indices)
|
Create script that print analysis_files (rep per rep)
|
Create script that print analysis_files (rep per rep)
|
Python
|
mit
|
chagaz/sfan,chagaz/sfan,chagaz/sfan,chagaz/sfan,chagaz/sfan
|
Create script that print analysis_files (rep per rep)
|
import synthetic_data_experiments as sde
import logging
if __name__ == "__main__":
args = sde.get_integrous_arguments_values()
for repeat_idx in xrange(args.num_repeats) :
resu_dir = "%s/repeat_%d" % (args.resu_dir, repeat_idx)
data_dir = '%s/repeat_%d' % (args.data_dir, repeat_idx)
trIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.trIndices'
teIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.teIndices'
ssIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.ss%d.ssIndices'
xp_indices = [{'trIndices': list(), 'teIndices':list(), 'ssIndices':list()} for fold in xrange(args.num_folds)]
for fold_idx in xrange (args.num_folds) :
with open(trIndices_fname %(fold_idx), 'r') as trIndices_f :
line = trIndices_f.readline().split()
xp_indices[fold_idx]["trIndices"] = [int (i) for i in line ]
with open(teIndices_fname %(fold_idx),'r') as teIndices_f :
line = teIndices_f.readline().split()
xp_indices[fold_idx]["teIndices"] = [int (i) for i in line ]
for ss_idx in xrange (args.num_subsamples) :
with open(ssIndices_fname %(fold_idx,ss_idx), 'r') as ssIndices_f:
line = ssIndices_f.readline().split()
xp_indices[fold_idx]["ssIndices"].append( [int (i) for i in line ] )
sde.print_analysis_files( args, resu_dir, data_dir, xp_indices)
|
<commit_before><commit_msg>Create script that print analysis_files (rep per rep)<commit_after>
|
import synthetic_data_experiments as sde
import logging
if __name__ == "__main__":
args = sde.get_integrous_arguments_values()
for repeat_idx in xrange(args.num_repeats) :
resu_dir = "%s/repeat_%d" % (args.resu_dir, repeat_idx)
data_dir = '%s/repeat_%d' % (args.data_dir, repeat_idx)
trIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.trIndices'
teIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.teIndices'
ssIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.ss%d.ssIndices'
xp_indices = [{'trIndices': list(), 'teIndices':list(), 'ssIndices':list()} for fold in xrange(args.num_folds)]
for fold_idx in xrange (args.num_folds) :
with open(trIndices_fname %(fold_idx), 'r') as trIndices_f :
line = trIndices_f.readline().split()
xp_indices[fold_idx]["trIndices"] = [int (i) for i in line ]
with open(teIndices_fname %(fold_idx),'r') as teIndices_f :
line = teIndices_f.readline().split()
xp_indices[fold_idx]["teIndices"] = [int (i) for i in line ]
for ss_idx in xrange (args.num_subsamples) :
with open(ssIndices_fname %(fold_idx,ss_idx), 'r') as ssIndices_f:
line = ssIndices_f.readline().split()
xp_indices[fold_idx]["ssIndices"].append( [int (i) for i in line ] )
sde.print_analysis_files( args, resu_dir, data_dir, xp_indices)
|
Create script that print analysis_files (rep per rep)import synthetic_data_experiments as sde
import logging
if __name__ == "__main__":
args = sde.get_integrous_arguments_values()
for repeat_idx in xrange(args.num_repeats) :
resu_dir = "%s/repeat_%d" % (args.resu_dir, repeat_idx)
data_dir = '%s/repeat_%d' % (args.data_dir, repeat_idx)
trIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.trIndices'
teIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.teIndices'
ssIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.ss%d.ssIndices'
xp_indices = [{'trIndices': list(), 'teIndices':list(), 'ssIndices':list()} for fold in xrange(args.num_folds)]
for fold_idx in xrange (args.num_folds) :
with open(trIndices_fname %(fold_idx), 'r') as trIndices_f :
line = trIndices_f.readline().split()
xp_indices[fold_idx]["trIndices"] = [int (i) for i in line ]
with open(teIndices_fname %(fold_idx),'r') as teIndices_f :
line = teIndices_f.readline().split()
xp_indices[fold_idx]["teIndices"] = [int (i) for i in line ]
for ss_idx in xrange (args.num_subsamples) :
with open(ssIndices_fname %(fold_idx,ss_idx), 'r') as ssIndices_f:
line = ssIndices_f.readline().split()
xp_indices[fold_idx]["ssIndices"].append( [int (i) for i in line ] )
sde.print_analysis_files( args, resu_dir, data_dir, xp_indices)
|
<commit_before><commit_msg>Create script that print analysis_files (rep per rep)<commit_after>import synthetic_data_experiments as sde
import logging
if __name__ == "__main__":
args = sde.get_integrous_arguments_values()
for repeat_idx in xrange(args.num_repeats) :
resu_dir = "%s/repeat_%d" % (args.resu_dir, repeat_idx)
data_dir = '%s/repeat_%d' % (args.data_dir, repeat_idx)
trIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.trIndices'
teIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.teIndices'
ssIndices_fname = data_dir+'/'+args.simu_id+'.fold%d.ss%d.ssIndices'
xp_indices = [{'trIndices': list(), 'teIndices':list(), 'ssIndices':list()} for fold in xrange(args.num_folds)]
for fold_idx in xrange (args.num_folds) :
with open(trIndices_fname %(fold_idx), 'r') as trIndices_f :
line = trIndices_f.readline().split()
xp_indices[fold_idx]["trIndices"] = [int (i) for i in line ]
with open(teIndices_fname %(fold_idx),'r') as teIndices_f :
line = teIndices_f.readline().split()
xp_indices[fold_idx]["teIndices"] = [int (i) for i in line ]
for ss_idx in xrange (args.num_subsamples) :
with open(ssIndices_fname %(fold_idx,ss_idx), 'r') as ssIndices_f:
line = ssIndices_f.readline().split()
xp_indices[fold_idx]["ssIndices"].append( [int (i) for i in line ] )
sde.print_analysis_files( args, resu_dir, data_dir, xp_indices)
|
|
e425efee30dacb16b5e3f677ffea8ab39c66c6ac
|
dump_db.py
|
dump_db.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Dump SQL database into bibtex file."""
import sys
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.customization import convert_to_unicode
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_db import BiblioEntry
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments. Usage: \n")
print("python3 dump_db.py name.db dump.bib")
print("Dump database")
print("Database: ", sys.argv[1])
engine = create_engine('sqlite:///app.db')
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
db = BibDatabase()
db.entries = []
dbentries = session.query(BiblioEntry)
for e in dbentries:
db.entries.append(
{'journal': e.journal,
'month': e.month,
'title': e.title,
'year': str(e.year),
'publisher': e.publisher,
'school': e.school,
'ID': e.ID,
'url': e.url,
'pdf': e.pdf,
'author': e.authors,
'keyword': e.keywords,
'ENTRYTYPE': e.ENTRYTYPE}
)
print("Write file on", sys.argv[2])
writer = BibTexWriter()
with open(sys.argv[2], 'w') as bibfile:
bibfile.write(writer.write(db))
session.close()
print("Connection closed.")
if __name__ == '__main__':
main()
|
Add a script to dump DB in bib file
|
[DEV] Add a script to dump DB in bib file
|
Python
|
mit
|
frapac/bibtex-browser,frapac/bibtex-browser,frapac/bibtex-browser
|
[DEV] Add a script to dump DB in bib file
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Dump SQL database into bibtex file."""
import sys
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.customization import convert_to_unicode
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_db import BiblioEntry
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments. Usage: \n")
print("python3 dump_db.py name.db dump.bib")
print("Dump database")
print("Database: ", sys.argv[1])
engine = create_engine('sqlite:///app.db')
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
db = BibDatabase()
db.entries = []
dbentries = session.query(BiblioEntry)
for e in dbentries:
db.entries.append(
{'journal': e.journal,
'month': e.month,
'title': e.title,
'year': str(e.year),
'publisher': e.publisher,
'school': e.school,
'ID': e.ID,
'url': e.url,
'pdf': e.pdf,
'author': e.authors,
'keyword': e.keywords,
'ENTRYTYPE': e.ENTRYTYPE}
)
print("Write file on", sys.argv[2])
writer = BibTexWriter()
with open(sys.argv[2], 'w') as bibfile:
bibfile.write(writer.write(db))
session.close()
print("Connection closed.")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[DEV] Add a script to dump DB in bib file<commit_after>
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Dump SQL database into bibtex file."""
import sys
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.customization import convert_to_unicode
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_db import BiblioEntry
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments. Usage: \n")
print("python3 dump_db.py name.db dump.bib")
print("Dump database")
print("Database: ", sys.argv[1])
engine = create_engine('sqlite:///app.db')
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
db = BibDatabase()
db.entries = []
dbentries = session.query(BiblioEntry)
for e in dbentries:
db.entries.append(
{'journal': e.journal,
'month': e.month,
'title': e.title,
'year': str(e.year),
'publisher': e.publisher,
'school': e.school,
'ID': e.ID,
'url': e.url,
'pdf': e.pdf,
'author': e.authors,
'keyword': e.keywords,
'ENTRYTYPE': e.ENTRYTYPE}
)
print("Write file on", sys.argv[2])
writer = BibTexWriter()
with open(sys.argv[2], 'w') as bibfile:
bibfile.write(writer.write(db))
session.close()
print("Connection closed.")
if __name__ == '__main__':
main()
|
[DEV] Add a script to dump DB in bib file# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Dump SQL database into bibtex file."""
import sys
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.customization import convert_to_unicode
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_db import BiblioEntry
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments. Usage: \n")
print("python3 dump_db.py name.db dump.bib")
print("Dump database")
print("Database: ", sys.argv[1])
engine = create_engine('sqlite:///app.db')
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
db = BibDatabase()
db.entries = []
dbentries = session.query(BiblioEntry)
for e in dbentries:
db.entries.append(
{'journal': e.journal,
'month': e.month,
'title': e.title,
'year': str(e.year),
'publisher': e.publisher,
'school': e.school,
'ID': e.ID,
'url': e.url,
'pdf': e.pdf,
'author': e.authors,
'keyword': e.keywords,
'ENTRYTYPE': e.ENTRYTYPE}
)
print("Write file on", sys.argv[2])
writer = BibTexWriter()
with open(sys.argv[2], 'w') as bibfile:
bibfile.write(writer.write(db))
session.close()
print("Connection closed.")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[DEV] Add a script to dump DB in bib file<commit_after># !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Dump SQL database into bibtex file."""
import sys
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.customization import convert_to_unicode
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from create_db import BiblioEntry
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments. Usage: \n")
print("python3 dump_db.py name.db dump.bib")
print("Dump database")
print("Database: ", sys.argv[1])
engine = create_engine('sqlite:///app.db')
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
db = BibDatabase()
db.entries = []
dbentries = session.query(BiblioEntry)
for e in dbentries:
db.entries.append(
{'journal': e.journal,
'month': e.month,
'title': e.title,
'year': str(e.year),
'publisher': e.publisher,
'school': e.school,
'ID': e.ID,
'url': e.url,
'pdf': e.pdf,
'author': e.authors,
'keyword': e.keywords,
'ENTRYTYPE': e.ENTRYTYPE}
)
print("Write file on", sys.argv[2])
writer = BibTexWriter()
with open(sys.argv[2], 'w') as bibfile:
bibfile.write(writer.write(db))
session.close()
print("Connection closed.")
if __name__ == '__main__':
main()
|
|
f5172547163660cacee8c6e8dda157322ac7e0a1
|
fib/fib.py
|
fib/fib.py
|
phi = (1 + 5**0.5) / 2
def F(n):
return int(round((phi**n - (1-phi)**n) / 5**0.5))
def fib(n):
a = 0
fibs = []
if n > 0:
while a < n:
a = a + 1
fibs.append(F(a))
elif n < 0:
while a > n:
a = a - 1
fibs.append(F(a))
return fibs
def run():
numberWanted = int(input("Stage of Fibs wanted"))
fibs = fib(numberWanted)
print("F(0) = 0")
a = 0
if numberWanted > 0:
while a < numberWanted:
a = a + 1
print("F(" + str(a) + ") = " + str(fibs[a-1]))
elif numberWanted < 0:
while a > numberWanted:
a = a - 1
print("F(" + str(a) + ") = " + str(fibs[(-1 * a)-1]))
run()
|
Add Fibonacci script to Python
|
Add Fibonacci script to Python
|
Python
|
mit
|
Strikingwolf/Messing-With-Python
|
Add Fibonacci script to Python
|
phi = (1 + 5**0.5) / 2
def F(n):
return int(round((phi**n - (1-phi)**n) / 5**0.5))
def fib(n):
a = 0
fibs = []
if n > 0:
while a < n:
a = a + 1
fibs.append(F(a))
elif n < 0:
while a > n:
a = a - 1
fibs.append(F(a))
return fibs
def run():
numberWanted = int(input("Stage of Fibs wanted"))
fibs = fib(numberWanted)
print("F(0) = 0")
a = 0
if numberWanted > 0:
while a < numberWanted:
a = a + 1
print("F(" + str(a) + ") = " + str(fibs[a-1]))
elif numberWanted < 0:
while a > numberWanted:
a = a - 1
print("F(" + str(a) + ") = " + str(fibs[(-1 * a)-1]))
run()
|
<commit_before><commit_msg>Add Fibonacci script to Python<commit_after>
|
phi = (1 + 5**0.5) / 2
def F(n):
return int(round((phi**n - (1-phi)**n) / 5**0.5))
def fib(n):
a = 0
fibs = []
if n > 0:
while a < n:
a = a + 1
fibs.append(F(a))
elif n < 0:
while a > n:
a = a - 1
fibs.append(F(a))
return fibs
def run():
numberWanted = int(input("Stage of Fibs wanted"))
fibs = fib(numberWanted)
print("F(0) = 0")
a = 0
if numberWanted > 0:
while a < numberWanted:
a = a + 1
print("F(" + str(a) + ") = " + str(fibs[a-1]))
elif numberWanted < 0:
while a > numberWanted:
a = a - 1
print("F(" + str(a) + ") = " + str(fibs[(-1 * a)-1]))
run()
|
Add Fibonacci script to Pythonphi = (1 + 5**0.5) / 2
def F(n):
return int(round((phi**n - (1-phi)**n) / 5**0.5))
def fib(n):
a = 0
fibs = []
if n > 0:
while a < n:
a = a + 1
fibs.append(F(a))
elif n < 0:
while a > n:
a = a - 1
fibs.append(F(a))
return fibs
def run():
numberWanted = int(input("Stage of Fibs wanted"))
fibs = fib(numberWanted)
print("F(0) = 0")
a = 0
if numberWanted > 0:
while a < numberWanted:
a = a + 1
print("F(" + str(a) + ") = " + str(fibs[a-1]))
elif numberWanted < 0:
while a > numberWanted:
a = a - 1
print("F(" + str(a) + ") = " + str(fibs[(-1 * a)-1]))
run()
|
<commit_before><commit_msg>Add Fibonacci script to Python<commit_after>phi = (1 + 5**0.5) / 2
def F(n):
return int(round((phi**n - (1-phi)**n) / 5**0.5))
def fib(n):
a = 0
fibs = []
if n > 0:
while a < n:
a = a + 1
fibs.append(F(a))
elif n < 0:
while a > n:
a = a - 1
fibs.append(F(a))
return fibs
def run():
numberWanted = int(input("Stage of Fibs wanted"))
fibs = fib(numberWanted)
print("F(0) = 0")
a = 0
if numberWanted > 0:
while a < numberWanted:
a = a + 1
print("F(" + str(a) + ") = " + str(fibs[a-1]))
elif numberWanted < 0:
while a > numberWanted:
a = a - 1
print("F(" + str(a) + ") = " + str(fibs[(-1 * a)-1]))
run()
|
|
862611ce97a45f5cbc78cae298a7f4936454ad19
|
examples/crackme_xor_obfu.py
|
examples/crackme_xor_obfu.py
|
import smt2lib
from triton import *
# PoC. Doesn't work yet
# $ triton ./examples/crackme_xor_obfu.py ./samples/crackmes/crackme_xor_obfu a
_GREEN = "\033[92m"
_ENDC = "\033[0m"
def cbeforeSymProc(instruction):
# 400544 mov [rbp+user_password], rdi
# RDI points on the user password
if instruction.address == 0x400544:
rdi = getRegValue(IDREF.REG.RDI)
taintMem(rdi)
def cafter(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicElements:
if se.isTainted == True:
print '%s\t -> %s%s' %(_GREEN, se.expression, _ENDC)
else:
print '%s\t -> %s%s' %(_ENDC, se.expression, _ENDC)
if instruction.address == 0x4011ed:
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getBacktrackedSymExpr(raxId)
expr = smt2lib.smtAssert(smt2lib.equal(raxExpr, smt2lib.bv(0, 64))) # (assert (= rax 0)
print expr
print getModel(expr)
if __name__ == '__main__':
startAnalysisFromAddr(0x4011dd)
stopAnalysisFromAddr(0x40120b)
addCallback(cbeforeSymProc, IDREF.CALLBACK.BEFORE_SYMPROC)
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
Test to crack the xor obfuscation crackme
|
Test to crack the xor obfuscation crackme
|
Python
|
apache-2.0
|
JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton
|
Test to crack the xor obfuscation crackme
|
import smt2lib
from triton import *
# PoC. Doesn't work yet
# $ triton ./examples/crackme_xor_obfu.py ./samples/crackmes/crackme_xor_obfu a
_GREEN = "\033[92m"
_ENDC = "\033[0m"
def cbeforeSymProc(instruction):
# 400544 mov [rbp+user_password], rdi
# RDI points on the user password
if instruction.address == 0x400544:
rdi = getRegValue(IDREF.REG.RDI)
taintMem(rdi)
def cafter(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicElements:
if se.isTainted == True:
print '%s\t -> %s%s' %(_GREEN, se.expression, _ENDC)
else:
print '%s\t -> %s%s' %(_ENDC, se.expression, _ENDC)
if instruction.address == 0x4011ed:
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getBacktrackedSymExpr(raxId)
expr = smt2lib.smtAssert(smt2lib.equal(raxExpr, smt2lib.bv(0, 64))) # (assert (= rax 0)
print expr
print getModel(expr)
if __name__ == '__main__':
startAnalysisFromAddr(0x4011dd)
stopAnalysisFromAddr(0x40120b)
addCallback(cbeforeSymProc, IDREF.CALLBACK.BEFORE_SYMPROC)
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
<commit_before><commit_msg>Test to crack the xor obfuscation crackme<commit_after>
|
import smt2lib
from triton import *
# PoC. Doesn't work yet
# $ triton ./examples/crackme_xor_obfu.py ./samples/crackmes/crackme_xor_obfu a
_GREEN = "\033[92m"
_ENDC = "\033[0m"
def cbeforeSymProc(instruction):
# 400544 mov [rbp+user_password], rdi
# RDI points on the user password
if instruction.address == 0x400544:
rdi = getRegValue(IDREF.REG.RDI)
taintMem(rdi)
def cafter(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicElements:
if se.isTainted == True:
print '%s\t -> %s%s' %(_GREEN, se.expression, _ENDC)
else:
print '%s\t -> %s%s' %(_ENDC, se.expression, _ENDC)
if instruction.address == 0x4011ed:
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getBacktrackedSymExpr(raxId)
expr = smt2lib.smtAssert(smt2lib.equal(raxExpr, smt2lib.bv(0, 64))) # (assert (= rax 0)
print expr
print getModel(expr)
if __name__ == '__main__':
startAnalysisFromAddr(0x4011dd)
stopAnalysisFromAddr(0x40120b)
addCallback(cbeforeSymProc, IDREF.CALLBACK.BEFORE_SYMPROC)
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
Test to crack the xor obfuscation crackme
import smt2lib
from triton import *
# PoC. Doesn't work yet
# $ triton ./examples/crackme_xor_obfu.py ./samples/crackmes/crackme_xor_obfu a
_GREEN = "\033[92m"
_ENDC = "\033[0m"
def cbeforeSymProc(instruction):
# 400544 mov [rbp+user_password], rdi
# RDI points on the user password
if instruction.address == 0x400544:
rdi = getRegValue(IDREF.REG.RDI)
taintMem(rdi)
def cafter(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicElements:
if se.isTainted == True:
print '%s\t -> %s%s' %(_GREEN, se.expression, _ENDC)
else:
print '%s\t -> %s%s' %(_ENDC, se.expression, _ENDC)
if instruction.address == 0x4011ed:
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getBacktrackedSymExpr(raxId)
expr = smt2lib.smtAssert(smt2lib.equal(raxExpr, smt2lib.bv(0, 64))) # (assert (= rax 0)
print expr
print getModel(expr)
if __name__ == '__main__':
startAnalysisFromAddr(0x4011dd)
stopAnalysisFromAddr(0x40120b)
addCallback(cbeforeSymProc, IDREF.CALLBACK.BEFORE_SYMPROC)
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
<commit_before><commit_msg>Test to crack the xor obfuscation crackme<commit_after>
import smt2lib
from triton import *
# PoC. Doesn't work yet
# $ triton ./examples/crackme_xor_obfu.py ./samples/crackmes/crackme_xor_obfu a
_GREEN = "\033[92m"
_ENDC = "\033[0m"
def cbeforeSymProc(instruction):
# 400544 mov [rbp+user_password], rdi
# RDI points on the user password
if instruction.address == 0x400544:
rdi = getRegValue(IDREF.REG.RDI)
taintMem(rdi)
def cafter(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicElements:
if se.isTainted == True:
print '%s\t -> %s%s' %(_GREEN, se.expression, _ENDC)
else:
print '%s\t -> %s%s' %(_ENDC, se.expression, _ENDC)
if instruction.address == 0x4011ed:
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getBacktrackedSymExpr(raxId)
expr = smt2lib.smtAssert(smt2lib.equal(raxExpr, smt2lib.bv(0, 64))) # (assert (= rax 0)
print expr
print getModel(expr)
if __name__ == '__main__':
startAnalysisFromAddr(0x4011dd)
stopAnalysisFromAddr(0x40120b)
addCallback(cbeforeSymProc, IDREF.CALLBACK.BEFORE_SYMPROC)
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
|
6fc0c3884c38448956273d99a57e0c758ecbc658
|
crmapp/marketing/views.py
|
crmapp/marketing/views.py
|
from django.shortcuts import render
# Create your views here.
|
from django.views.generic.base import TemplateView
class HomePage(TemplateView):
"""
Because our needs are so simple, all we have to do is
assign one value; template_name. The home.html file will be created
in the next lesson.
"""
template_name = 'marketing/home.html'
|
Create the Home Page > Create the Home Page View
|
Create the Home Page > Create the Home Page View
|
Python
|
mit
|
tabdon/crmeasyapp,tabdon/crmeasyapp,deenaariff/Django
|
from django.shortcuts import render
# Create your views here.
Create the Home Page > Create the Home Page View
|
from django.views.generic.base import TemplateView
class HomePage(TemplateView):
"""
Because our needs are so simple, all we have to do is
assign one value; template_name. The home.html file will be created
in the next lesson.
"""
template_name = 'marketing/home.html'
|
<commit_before>from django.shortcuts import render
# Create your views here.
<commit_msg>Create the Home Page > Create the Home Page View<commit_after>
|
from django.views.generic.base import TemplateView
class HomePage(TemplateView):
"""
Because our needs are so simple, all we have to do is
assign one value; template_name. The home.html file will be created
in the next lesson.
"""
template_name = 'marketing/home.html'
|
from django.shortcuts import render
# Create your views here.
Create the Home Page > Create the Home Page Viewfrom django.views.generic.base import TemplateView
class HomePage(TemplateView):
"""
Because our needs are so simple, all we have to do is
assign one value; template_name. The home.html file will be created
in the next lesson.
"""
template_name = 'marketing/home.html'
|
<commit_before>from django.shortcuts import render
# Create your views here.
<commit_msg>Create the Home Page > Create the Home Page View<commit_after>from django.views.generic.base import TemplateView
class HomePage(TemplateView):
"""
Because our needs are so simple, all we have to do is
assign one value; template_name. The home.html file will be created
in the next lesson.
"""
template_name = 'marketing/home.html'
|
3e147eba049c51c3b1c7c7278f48e40ef5b1263f
|
paperpass.py
|
paperpass.py
|
import json
class PaperPass:
# class var
outline = {}
def __init__(self, outline):
self.outline = outline
def outputjson(self, filename):
fp = open(filename, 'w')
json.dump(self.outline, fp)
if __name__ == "__main__":
import sys
a = PaperPass({3:2,2:1})
a.outputjson(sys.argv[1])
|
Create PaperPass class. It contains outputjson method.
|
Create PaperPass class. It contains outputjson method.
|
Python
|
mit
|
lucaskotw/paperpass
|
Create PaperPass class. It contains outputjson method.
|
import json
class PaperPass:
# class var
outline = {}
def __init__(self, outline):
self.outline = outline
def outputjson(self, filename):
fp = open(filename, 'w')
json.dump(self.outline, fp)
if __name__ == "__main__":
import sys
a = PaperPass({3:2,2:1})
a.outputjson(sys.argv[1])
|
<commit_before><commit_msg>Create PaperPass class. It contains outputjson method.<commit_after>
|
import json
class PaperPass:
# class var
outline = {}
def __init__(self, outline):
self.outline = outline
def outputjson(self, filename):
fp = open(filename, 'w')
json.dump(self.outline, fp)
if __name__ == "__main__":
import sys
a = PaperPass({3:2,2:1})
a.outputjson(sys.argv[1])
|
Create PaperPass class. It contains outputjson method.import json
class PaperPass:
# class var
outline = {}
def __init__(self, outline):
self.outline = outline
def outputjson(self, filename):
fp = open(filename, 'w')
json.dump(self.outline, fp)
if __name__ == "__main__":
import sys
a = PaperPass({3:2,2:1})
a.outputjson(sys.argv[1])
|
<commit_before><commit_msg>Create PaperPass class. It contains outputjson method.<commit_after>import json
class PaperPass:
# class var
outline = {}
def __init__(self, outline):
self.outline = outline
def outputjson(self, filename):
fp = open(filename, 'w')
json.dump(self.outline, fp)
if __name__ == "__main__":
import sys
a = PaperPass({3:2,2:1})
a.outputjson(sys.argv[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.