repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
larrybradley/astropy | astropy/wcs/tests/test_wcsprm.py | Python | bsd-3-clause | 27,671 | 0.000217 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import locale
import re
from packaging.version import Version
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from astropy.io import fits
from astropy.wcs import wcs
from astropy.wcs import _wcs
from astropy.wcs.wcs import FITSFixedWarning
from astropy.utils.data import (
get_pkg_data_contents, get_pkg_data_fileobj, get_pkg_data_filename)
from astropy.utils.misc import _set_locale
from astropy import units as u
from astropy.units.core import UnitsWarning
######################################################################
def test_alt():
w = _wcs.Wcsprm()
assert w.alt == " "
w.alt = "X"
assert w.alt == "X"
del w.alt
assert w.alt == " "
def test_alt_invalid1():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = "$"
def test_alt_invalid2():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = " "
def test_axis_types():
w = _wcs.Wcsprm()
assert_array_equal(w.axis_types, [0, 0])
def test_cd():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0 | , 1]]
asse | rt w.cd.dtype == float
assert w.has_cd() is True
assert_array_equal(w.cd, [[1, 0], [0, 1]])
del w.cd
assert w.has_cd() is False
def test_cd_missing():
w = _wcs.Wcsprm()
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_missing2():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.has_cd() is True
del w.cd
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_invalid():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.cd = [1, 0, 0, 1]
def test_cdfix():
w = _wcs.Wcsprm()
w.cdfix()
def test_cdelt():
w = _wcs.Wcsprm()
assert_array_equal(w.cdelt, [1, 1])
w.cdelt = [42, 54]
assert_array_equal(w.cdelt, [42, 54])
def test_cdelt_delete():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
del w.cdelt
def test_cel_offset():
w = _wcs.Wcsprm()
assert w.cel_offset is False
w.cel_offset = 'foo'
assert w.cel_offset is True
w.cel_offset = 0
assert w.cel_offset is False
def test_celfix():
# TODO: We need some data with -NCP or -GLS projections to test
# with. For now, this is just a smoke test
w = _wcs.Wcsprm()
assert w.celfix() == -1
def test_cname():
w = _wcs.Wcsprm()
# Test that this works as an iterator
for x in w.cname:
assert x == ''
assert list(w.cname) == ['', '']
w.cname = [b'foo', 'bar']
assert list(w.cname) == ['foo', 'bar']
def test_cname_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.cname = [42, 54]
def test_colax():
w = _wcs.Wcsprm()
assert w.colax.dtype == np.intc
assert_array_equal(w.colax, [0, 0])
w.colax = [42, 54]
assert_array_equal(w.colax, [42, 54])
w.colax[0] = 0
assert_array_equal(w.colax, [0, 54])
with pytest.raises(ValueError):
w.colax = [1, 2, 3]
def test_colnum():
w = _wcs.Wcsprm()
assert w.colnum == 0
w.colnum = 42
assert w.colnum == 42
with pytest.raises(OverflowError):
w.colnum = 0xffffffffffffffffffff
with pytest.raises(OverflowError):
w.colnum = 0xffffffff
with pytest.raises(TypeError):
del w.colnum
def test_colnum_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.colnum = 'foo'
def test_crder():
w = _wcs.Wcsprm()
assert w.crder.dtype == float
assert np.all(np.isnan(w.crder))
w.crder[0] = 0
assert np.isnan(w.crder[1])
assert w.crder[0] == 0
w.crder = w.crder
def test_crota():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.crota.dtype == float
assert w.has_crota() is True
assert_array_equal(w.crota, [1, 0])
del w.crota
assert w.has_crota() is False
def test_crota_missing():
w = _wcs.Wcsprm()
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crota_missing2():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.has_crota() is True
del w.crota
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crpix():
w = _wcs.Wcsprm()
assert w.crpix.dtype == float
assert_array_equal(w.crpix, [0, 0])
w.crpix = [42, 54]
assert_array_equal(w.crpix, [42, 54])
w.crpix[0] = 0
assert_array_equal(w.crpix, [0, 54])
with pytest.raises(ValueError):
w.crpix = [1, 2, 3]
def test_crval():
w = _wcs.Wcsprm()
assert w.crval.dtype == float
assert_array_equal(w.crval, [0, 0])
w.crval = [42, 54]
assert_array_equal(w.crval, [42, 54])
w.crval[0] = 0
assert_array_equal(w.crval, [0, 54])
def test_csyer():
w = _wcs.Wcsprm()
assert w.csyer.dtype == float
assert np.all(np.isnan(w.csyer))
w.csyer[0] = 0
assert np.isnan(w.csyer[1])
assert w.csyer[0] == 0
w.csyer = w.csyer
def test_ctype():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
w.ctype = [b'RA---TAN', 'DEC--TAN']
assert_array_equal(w.axis_types, [2200, 2201])
assert w.lat == 1
assert w.lng == 0
assert w.lattyp == 'DEC'
assert w.lngtyp == 'RA'
assert list(w.ctype) == ['RA---TAN', 'DEC--TAN']
w.ctype = ['foo', 'bar']
assert_array_equal(w.axis_types, [0, 0])
assert list(w.ctype) == ['foo', 'bar']
assert w.lat == -1
assert w.lng == -1
assert w.lattyp == 'DEC'
assert w.lngtyp == 'RA'
def test_ctype_repr():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
w.ctype = [b'RA-\t--TAN', 'DEC-\n-TAN']
assert repr(w.ctype == '["RA-\t--TAN", "DEC-\n-TAN"]')
def test_ctype_index_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
for idx in (2, -3):
with pytest.raises(IndexError):
w.ctype[idx]
with pytest.raises(IndexError):
w.ctype[idx] = 'FOO'
def test_ctype_invalid_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ['', '']
with pytest.raises(ValueError):
w.ctype[0] = 'X' * 100
with pytest.raises(TypeError):
w.ctype[0] = True
with pytest.raises(TypeError):
w.ctype = ['a', 0]
with pytest.raises(TypeError):
w.ctype = None
with pytest.raises(ValueError):
w.ctype = ['a', 'b', 'c']
with pytest.raises(ValueError):
w.ctype = ['FOO', 'A' * 100]
def test_cubeface():
w = _wcs.Wcsprm()
assert w.cubeface == -1
w.cubeface = 0
with pytest.raises(OverflowError):
w.cubeface = -1
def test_cunit():
w = _wcs.Wcsprm()
assert list(w.cunit) == [u.Unit(''), u.Unit('')]
w.cunit = [u.m, 'km']
assert w.cunit[0] == u.m
assert w.cunit[1] == u.km
def test_cunit_invalid():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning, match='foo') as warns:
w.cunit[0] = 'foo'
assert len(warns) == 1
def test_cunit_invalid2():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning) as warns:
w.cunit = ['foo', 'bar']
assert len(warns) == 2
assert 'foo' in str(warns[0].message)
assert 'bar' in str(warns[1].message)
def test_unit():
w = wcs.WCS()
w.wcs.cunit[0] = u.erg
assert w.wcs.cunit[0] == u.erg
assert repr(w.wcs.cunit) == "['erg', '']"
def test_unit2():
w = wcs.WCS()
with pytest.warns(UnitsWarning):
myunit = u.Unit("FOOBAR", parse_strict="warn")
w.wcs.cunit[0] = myunit
def test_unit3():
w = wcs.WCS()
for idx in (2, -3):
with pytest.raises(IndexError):
w.wcs.cunit[idx]
with pytest.raises(IndexError):
w.wcs.cunit[idx] = u.m
with pytest.raises(ValueError):
w.wcs.cunit = [u.m, u.m, u.m]
def test_unitfix():
w = _wcs.Wcsprm()
w.unitfix()
def test_cylfix():
# TODO: We need some data with broken cylindrical projections to
# test with. For now, this is just a smoke test.
w = _wcs.Wcsprm()
assert w.cylfix() == -1
ass |
Property404/OSIR | server/admin/xcrypt.py | Python | gpl-3.0 | 1,528 | 0.081806 | #!/usr/bin/env python
import base64
from os import urandom
from sys import argv
_XOR_KEY_SIZE=8
#xor two strings
def xor_str(s1,s2):
#print([a for a in s2]);
return bytearray(s1[i]^s2[i%_XOR_KEY | _SIZE] for i in range(len(s1)))
#xor msg with random key, encode in b64
def encrypt(msg):
msg+=bytearray([msg[0]^msg[len(msg)-1]])
key=urandom(_XOR_KEY_SIZE);
return (base64.b64encode((key+xor_str(msg,key))))+b"=";
#Undo encrypt
def decrypt(msg):
try:
msg=base64.b64decode(msg)
except base64.binascii.Error:
print( | "File not in base64");
exit(1);
rmsg=xor_str(bytearray(i for i in msg[_XOR_KEY_SIZE::]),msg[0:_XOR_KEY_SIZE]);
if(rmsg[0] ^ rmsg[-2]==rmsg[-1]):return rmsg[0:-1]
print("Integrity check failed");
exit(1);
#Make encrypted file
def encrypt_file(fname,oname):
fp=open(fname,"rb");
ftext=fp.read();
fp.close();
fp=open(oname,"wb");
fp.write(bytearray(i for i in encrypt(ftext)));
fp.close();
def decrypt_file(fname,oname):
fp=open(fname,"rb");
ftext=fp.read();
fp.close();
fp=open(oname,"wb");
fp.write(bytearray(i for i in decrypt(ftext)));
fp.close();
if __name__=="__main__":
usage="Usage: xcrypt.py [-rd] infile outfile"
if(len(argv)<3):
print(usage);
elif(argv[1]=='-d'):
if(len(argv)<4):
print(usage);
decrypt_file(argv[2],argv[3]);
elif(argv[1]=='-r'):
if(len(argv)<4):
print(usage);
fp=open(argv[2],"r");
ftext=fp.read();
fp.close();
fp=open(argv[3],"wb");
fp.write(base64.b64decode(ftext));
fp.close();
else:
encrypt_file(argv[1],argv[2]);
|
kgiusti/gofer | test/unit/messaging/test_consumer.py | Python | lgpl-2.1 | 6,743 | 0 | # Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from threading import Thread
from unittest import TestCase
from mock import Mock, patch
from gofer.messaging import Node
from gofer.messaging.consumer import ConsumerThread, Consumer
from gofer.messaging import DocumentError, ValidationFailed
class TestConsumerThread(TestCase):
def test_init(self):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
self.assertEqual(consumer.node, node)
self.assertEqual(consumer.url, url)
self.assertEqual(consumer.wait, 3)
self.assertTrue(isinstance(consumer, Thread))
self.assertTrue(consumer.daemon)
self.assertEqual(consumer.reader, None)
@patch('gofer.common.Thread.abort')
def test_shutdown(self, abort):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.shutdown()
abort.assert_called_once_with()
@patch('gofer.messaging.consumer.Reader')
def test_run(self, reader):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.open = Mock()
consumer.close = Mock()
consumer.read = Mock(side_effect=StopIteration)
# test
try:
consumer.run()
except StopIteration:
pass
# validation
reader.assert_called_once_with(node, url)
consumer.open.assert_called_once_with()
consumer.read.assert_called_once_with()
consumer.close.assert_called_once_with()
def test_open(self):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
# test
consumer.open()
# validation
consumer.reader.open.assert_called_once_with()
def test_close(self):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
# test
consumer.close()
# validation
consumer.reader.close.assert_called_once_with()
def test_close_exception(self):
url = 'test-url'
node = Node('test-queue')
| consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.close.side_effect = ValueError
# test
consumer.close()
# validation
| consumer.reader.close.assert_called_once_with()
@patch('gofer.messaging.consumer.sleep')
def test_open_exception(self, sleep):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.open.side_effect = [ValueError, None]
# test
consumer.open()
# validation
sleep.assert_called_once_with(30)
self.assertEqual(consumer.reader.open.call_count, 2)
def test_read(self):
url = 'test-url'
node = Node('test-queue')
message = Mock()
document = Mock()
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.next.return_value = (message, document)
consumer.dispatch = Mock()
# test
consumer.read()
# validate
consumer.reader.next.assert_called_once_with(consumer.wait)
consumer.dispatch.assert_called_once_with(document)
message.ack.assert_called_once_with()
def test_read_nothing(self):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.next.return_value = (None, None)
consumer.dispatch = Mock()
# test
consumer.read()
# validate
self.assertFalse(consumer.dispatch.called)
def test_read_validation_failed(self):
url = 'test-url'
node = Node('test-queue')
failed = ValidationFailed(details='test')
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.next.side_effect = failed
consumer.rejected = Mock()
# test
consumer.read()
# validate
consumer.rejected.assert_called_once_with(
failed.code, failed.description, failed.document, failed.details)
def test_read_invalid_document(self):
url = 'test-url'
node = Node('test-queue')
code = 12
description = 'just up and failed'
document = Mock()
details = 'crashed'
ir = DocumentError(code, description, document, details)
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.next.side_effect = ir
consumer.rejected = Mock()
# test
consumer.read()
# validate
consumer.rejected.assert_called_once_with(
ir.code, ir.description, ir.document, ir.details)
@patch('gofer.messaging.consumer.sleep')
def test_read_exception(self, sleep):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.reader = Mock()
consumer.reader.next.side_effect = IndexError
consumer.open = Mock()
consumer.close = Mock()
# test
consumer.read()
# validation
consumer.close.assert_called_once_with()
consumer.open.assert_called_once_with()
sleep.assert_called_once_with(60)
def test_rejected(self):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.rejected('1', '2', '3', '4')
def test_dispatch(self):
url = 'test-url'
node = Node('test-queue')
consumer = ConsumerThread(node, url)
consumer.dispatch(Mock())
class TestConsumer(TestCase):
@patch('gofer.messaging.consumer.Reader', Mock())
def test_init(self):
url = 'test-url'
node = Node('test-queue')
# test
consumer = Consumer(node, url)
# validation
self.assertEqual(consumer.node, node)
self.assertEqual(consumer.url, url)
|
MatthewDaws/OSMDigest | notebooks/convert_to_db.py | Python | mit | 249 | 0.052209 | import sys
sys.path.insert(0, "..")
import osmdigest.sqlite as sq
import os
for x | in sq.convert_gen(os.path.join("..","..","..","data","california-latest.osm.xz"),
os.path.join("..","..","..","data","califor | nia-latest.db")):
print(x) |
b0ttl3z/SickRage | sickbeard/postProcessor.py | Python | gpl-3.0 | 58,238 | 0.004499 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied w | arranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=too-many-lines
from __future__ import print_fun | ction, unicode_literals
import fnmatch
import os
import re
import stat
import subprocess
import adba
import six
import sickbeard
from sickbeard import common, db, failed_history, helpers, history, logger, notifiers, show_name_helpers
from sickbeard.helpers import verify_freespace
from sickbeard.name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from sickrage.helper import glob
from sickrage.helper.common import remove_extension, replace_extension, SUBTITLE_EXTENSIONS
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import EpisodeNotFoundException, EpisodePostProcessingFailedException, ex, ShowDirectoryNotFoundException
from sickrage.show.Show import Show
METHOD_COPY = "copy"
METHOD_MOVE = "move"
METHOD_HARDLINK = "hardlink"
METHOD_SYMLINK = "symlink"
METHOD_SYMLINK_REVERSED = "symlink_reversed"
PROCESS_METHODS = [METHOD_COPY, METHOD_MOVE, METHOD_HARDLINK, METHOD_SYMLINK, METHOD_SYMLINK_REVERSED]
class PostProcessor(object): # pylint: disable=too-many-instance-attributes
"""
A class which will process a media file according to the post processing settings in the config.
"""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
IGNORED_FILESTRINGS = [".AppleDouble", ".DS_Store"]
def __init__(self, file_path, nzb_name=None, process_method=None, is_priority=None):
"""
Creates a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# absolute path to the folder that is being processed
self.folder_path = ek(os.path.dirname, ek(os.path.abspath, file_path))
# full path to file
self.file_path = file_path
# file name only
self.file_name = ek(os.path.basename, file_path)
# the name of the folder only
self.folder_name = ek(os.path.basename, self.folder_path)
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.process_method = process_method if process_method else sickbeard.PROCESS_METHOD
self.in_history = False
self.release_group = None
self.release_name = None
self.is_proper = False
self.is_priority = is_priority
self.log = ''
self.version = None
self.anidbEpisode = None
def _log(self, message, level=logger.INFO):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
:param message: The string to log (six.text_type)
:param level: The log level to use (optional)
"""
logger.log(message, level)
self.log += message + '\n'
def _checkForExistingFile(self, existing_file):
"""
Checks if a file exists already and if it does whether it's bigger or smaller than
the file we are post processing
;param existing_file: The file to compare to
:return:
DOESNT_EXIST if the file doesn't exist
EXISTS_LARGER if the file exists and is larger than the file we are post processing
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
EXISTS_SAME if the file exists and is the same size as the file we are post processing
"""
if not existing_file:
self._log("There is no existing file so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
# if the new file exists, return the appropriate code depending on the size
if ek(os.path.isfile, existing_file):
# see if it's bigger than our old file
if ek(os.path.getsize, existing_file) > ek(os.path.getsize, self.file_path):
self._log("File " + existing_file + " is larger than " + self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_LARGER
elif ek(os.path.getsize, existing_file) == ek(os.path.getsize, self.file_path):
self._log("File " + existing_file + " is the same size as " + self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SAME
else:
self._log("File " + existing_file + " is smaller than " + self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SMALLER
else:
self._log("File " + existing_file + " doesn't exist so there's no worries about replacing it",
logger.DEBUG)
return PostProcessor.DOESNT_EXIST
def list_associated_files( # pylint: disable=too-many-locals, too-many-branches, too-many-statements
self, file_path, subtitles_only=False, subfolders=False, rename=False):
"""
For a given file path searches for files with the same name but different extension and returns their absolute paths
:param file_path: The file to check for associated files
:return: A list containing all files which are associated to the given file
"""
def recursive_glob(treeroot, pattern):
results = []
for base, dirnames_, files in ek(os.walk, treeroot.encode(sickbeard.SYS_ENCODING), followlinks=sickbeard.PROCESSOR_FOLLOW_SYMLINKS):
goodfiles = fnmatch.filter(files, pattern)
for f in goodfiles:
found_file = ek(os.path.join, base, f)
if found_file != file_path:
results.append(found_file)
return results
if not file_path:
return []
file_path_list_to_allow = []
file_path_list_to_delete = []
if subfolders:
base_name = ek(os.path.basename, file_path).rpartition('.')[0]
else:
base_name = file_path.rpartition('.')[0]
# don't strip it all and use cwd by accident
if not base_name:
return []
dirname = ek(os.path.dirname, file_path) or '.'
# subfolders are only checked in show folder, so names will always be exactly alike
if subfolders:
# just create the list of all files starting with the basename
filelist = recursive_glob(dirname, glob.escape(base_name) + '*')
# this is called when PP, so we need to do the filename check case-insensitive
else:
filelist = []
# loop through all the files in the folder, and check if they are the same name even when the cases don't match
for found_file in glob.glob(ek(os.path.join, glob.escape(dirname), '*')):
file_name, separator, file_extension = found_file.rpartition('.')
# Handles subtitles with language code
if file_extension in SUBTITLE_EXTENSIONS and file_name.rpartition('.')[0].lower() == base_name.lower():
filelist.append(found_file)
# Handles all files with same basename, including subtitles without language code
elif file_name.lower() == base_name.lower():
filelist.append(found_file)
for associated_file_path i |
shaunharker/DSGRN | src/DSGRN/SubdomainGraph.py | Python | mit | 5,940 | 0.008081 | ### SubdomainGraph.py
### Shaun Harker
### 2018-03-25
### MIT LICENSE
from pychomp import *
import datetime
from itertools import product
from math import log2
from DSGRN._dsgrn import *
class SubdomainGraph:
def complex(self):
return self.cc
def diagram(self):
return self.digraph.adjacencies
def domains(self):
def domgen():
sizes = self.network.domains()
dom = Domain(sizes)
while dom.isValid():
yield dom
dom.preincrement()
return domgen()
def walls(self):
return ( wall for wall in self.cc(self.D-1) if not self.cc.rightfringe(wall) )
def box(self,subdom):
return self.cc.cell_index(subdom, 2**self.D - 1)
def subdomains(self):
return ( box for box in self.cc(self.D) if not self.cc.rightfringe(box) )
def centerbox(self,dom):
return self.box([3*i + 1 for i in dom])
def wallnormal(self,wall):
shape = self.cc.cell_shape(wall)
return int(log2(2**self.D - 1 - shape))
def posterior(self,wall):
dim = self.wallnormal(wall)
return self.cc.left(self.cc.left(wall,dim),dim)
def anterior(self,wall):
| dim = self.wallnormal(wall)
return self.cc.right(self.cc.right(wall,dim),dim)
## labelling
def | leftlabel(self):
return 1
def rightlabel(self):
return 2
def isLeftBoundingWall(self, wall):
normdim = self.wallnormal(wall)
return self.cc.leftfringe(wall)
def isRightBoundingWall(self, wall):
normdim = self.wallnormal(wall)
return self.cc.rightfringe(self.cc.right(wall, normdim))
def __init__(self, p, logging = None):
self.p = p
self.network = self.p.network()
self.D = self.network.size()
self.cc = CubicalComplex([ 3*x + 1 for x in self.network.domains()])
self.digraph = DirectedAcyclicGraph()
if logging:
print(datetime.datetime.now().time(), flush=True)
print("Complex size = " + str(self.cc.size()), flush=True)
print("Number of top cells = " + str(self.cc.size(self.D)), flush=True)
for box in self.cc(self.D):
self.digraph.add_vertex(box, str(self.cc.coordinates(box)))
if logging:
print(datetime.datetime.now().time(), flush=True)
print("Graph vertices constructed", flush=True)
# construct wall labels
# Step 1. Center labels
label = {}
for dom in self.domains():
box = self.centerbox(dom)
for dim in range(0,self.D):
x = self.p.absorbing(dom, dim, 1) # 1 -- right
rightwall = self.cc.right(box, dim)
label[rightwall] = self.rightlabel() if x else self.leftlabel()
y = self.p.absorbing(dom, dim, -1) # -1 -- left
leftwall = self.cc.left(box, dim)
label[leftwall] = self.leftlabel() if y else self.rightlabel()
if logging:
print(datetime.datetime.now().time(), flush=True)
print("Computed center labels", flush=True)
# Step 2. Center Transverse labels
for dom in self.domains():
box = self.centerbox(dom)
for dim in range(0,self.D):
for wall in self.posterior(self.cc.left(box, dim)), self.anterior(self.cc.right(box, dim)):
if self.isLeftBoundingWall(wall):
label[wall] = self.rightlabel()
continue
if self.isRightBoundingWall(wall):
label[wall] = self.leftlabel()
continue
posteriorlabel = label.get(self.posterior(wall),0)
anteriorlabel = label.get(self.anterior(wall),0)
if posteriorlabel == anteriorlabel:
for adjwall in self.cc.parallelneighbors(wall):
label[adjwall] = posteriorlabel
if logging:
print(datetime.datetime.now().time(), flush=True)
print("Computed transverse labels", flush=True)
# Step 4. Propagate labels
newlabel = dict(label)
for wall in label:
for adjwall in self.cc.parallelneighbors(wall):
if self.cc.rightfringe(adjwall): continue
if adjwall not in newlabel:
newlabel[adjwall] = 0
newlabel[adjwall] |= label[wall]
label = newlabel
if logging:
print(datetime.datetime.now().time(), flush=True)
print("Computed all wall labels", flush=True)
# Build graph edges
for wall in self.cc(self.D-1):
#print(" wall = " + str(wall),flush=True)
normdim = self.wallnormal(wall)
leftbox = self.cc.left(wall, normdim)
rightbox = self.cc.right(wall, normdim)
#print(str(self.cc.coordinates(leftbox)) + " and " + str(self.cc.coordinates(rightbox)))
#print(label.get(wall,0))
if label.get(wall,0) in [0,3]:
#print(str(self.cc.coordinates(leftbox)) + " <-> " + str(self.cc.coordinates(rightbox)))
self.digraph.add_edge(leftbox, rightbox)
self.digraph.add_edge(rightbox, leftbox)
elif label[wall] == self.rightlabel():
#print(str(self.cc.coordinates(leftbox)) + " -> " + str(self.cc.coordinates(rightbox)))
self.digraph.add_edge(leftbox, rightbox)
elif label[wall] == self.leftlabel():
#print(str(self.cc.coordinates(leftbox)) + " <- " + str(self.cc.coordinates(rightbox)))
self.digraph.add_edge(rightbox, leftbox)
if logging:
print(datetime.datetime.now().time(), flush=True)
print("Constructed SubdomainGraph", flush=True)
|
deployed/django | tests/timezones/tests.py | Python | bsd-3-clause | 55,059 | 0.002034 | from __future__ import unicode_literals
import datetime
import re
import sys
from unittest import skipIf
import warnings
from xml.dom.minidom import parseString
try:
import pytz
except ImportError:
pytz = None
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Min, Max
from django.http import HttpRequest
from django.template import Context, RequestContext, Template, TemplateSyntaxError
from django.test import TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import six
from django.utils import timezone
from .forms import EventForm, EventSplitForm, EventLocalizedForm, EventModelForm, EventLocalizedModelForm
from .models import Event, MaybeEvent, Session, SessionEvent, Timestamp, AllDayEvent
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backend.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backend.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, | 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now | ()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.asser |
anti-social/elasticmagic | elasticmagic/ext/asyncio/index.py | Python | apache-2.0 | 5,855 | 0 | from ...index import BaseIndex
class AsyncIndex(BaseIndex):
async def get_es_version(self):
return await self._cluster.get_es_version()
async def get_compiler(self):
return await self._cluster.get_compiler()
async def get(
self, doc_or_id, doc_cls=None, doc_type=None, source=None,
realtime=None, routing=None, preference=None, refresh=None,
version=None, version_type=None, **kwargs
):
return await self._cluster.get(
doc_or_id, index=self._name, doc_cls=doc_cls, doc_type=doc_type,
source=source, realtime=realtime, routing=routing,
preference=preference, refresh=refresh, version=version,
version_type=version_type, **kwargs
)
async def multi_get | (
self, docs, doc_type=None, source=None, realtime=None,
routing=None, preference=None, refresh=None, **kwargs
):
return await self._cluster.multi_get(
docs, index=self._name, doc_type=doc_type, source=source,
realtime=realtime, routing=routing, preference=preference,
refresh=refresh, **kwargs
)
mget = multi_get
async def search(
self, q, doc_type=None, routing=None, preference=None,
| timeout=None, search_type=None, query_cache=None,
terminate_after=None, scroll=None, stats=None, **kwargs
):
return await self._cluster.search(
q, index=self._name, doc_type=doc_type,
routing=routing, preference=preference, timeout=timeout,
search_type=search_type, query_cache=query_cache,
terminate_after=terminate_after, scroll=scroll, stats=stats,
**kwargs
)
async def explain(
self, q, doc_or_id, doc_cls=None, routing=None, **kwargs
):
return await self._cluster.explain(
q, doc_or_id, index=self._name, doc_cls=doc_cls, routing=routing,
**kwargs
)
async def multi_search(
self, queries, doc_type=None, routing=None, preference=None,
search_type=None, **kwargs
):
return await self._cluster.multi_search(
queries, index=self._name, doc_type=doc_type,
routing=routing, preference=preference, search_type=search_type,
**kwargs
)
msearch = multi_search
async def count(
self, q=None, doc_type=None, routing=None, preference=None,
**kwargs
):
return await self._cluster.count(
q, index=self._name, doc_type=doc_type, routing=routing,
preference=preference, **kwargs
)
async def exists(
self, q=None, doc_type=None, refresh=None, routing=None, **kwargs
):
return await self._cluster.exists(
q, index=self._name, doc_type=doc_type, refresh=refresh,
routing=routing, **kwargs
)
async def scroll(
self, scroll_id, scroll, doc_cls=None, instance_mapper=None,
**kwargs
):
return await self._cluster.scroll(
scroll_id, scroll,
doc_cls=doc_cls, instance_mapper=instance_mapper,
**kwargs
)
async def clear_scroll(self, scroll_id, **kwargs):
return await self._cluster.clear_scroll(scroll_id, **kwargs)
async def put_mapping(
self, doc_cls_or_mapping, doc_type=None, allow_no_indices=None,
expand_wildcards=None, ignore_conflicts=None,
ignore_unavailable=None, master_timeout=None, timeout=None,
**kwargs
):
return await self._cluster.put_mapping(
doc_cls_or_mapping, index=self._name, doc_type=doc_type,
allow_no_indices=allow_no_indices,
expand_wildcards=expand_wildcards,
ignore_conflicts=ignore_conflicts,
ignore_unavailable=ignore_unavailable,
master_timeout=master_timeout, timeout=timeout,
**kwargs
)
async def add(
self, docs, doc_type=None, refresh=None, timeout=None,
consistency=None, replication=None, **kwargs
):
return await self._cluster.add(
docs, index=self._name, doc_type=doc_type, refresh=refresh,
timeout=timeout, consistency=consistency, replication=replication,
**kwargs
)
async def delete(
self, doc_or_id, doc_cls=None, doc_type=None,
timeout=None, consistency=None, replication=None,
parent=None, routing=None, refresh=None, version=None,
version_type=None,
**kwargs
):
return await self._cluster.delete(
doc_or_id, index=self._name, doc_cls=doc_cls, doc_type=doc_type,
timeout=timeout, consistency=consistency, replication=replication,
parent=parent, routing=routing, refresh=refresh,
version=version, version_type=version_type,
**kwargs
)
async def delete_by_query(
self, q, doc_type=None, timeout=None, consistency=None,
replication=None, routing=None, **kwargs
):
return await self._cluster.delete_by_query(
q, index=self._name, doc_type=doc_type,
timeout=timeout, consistency=consistency,
replication=replication, routing=routing,
**kwargs
)
async def bulk(self, actions, doc_type=None, refresh=None, **kwargs):
return await self._cluster.bulk(
actions, index=self._name, doc_type=doc_type, refresh=refresh,
**kwargs
)
async def refresh(self, **kwargs):
return await self._cluster.refresh(index=self._name, **kwargs)
async def flush(self, **kwargs):
return await self._cluster.flush(index=self._name, **kwargs)
|
debomatic/debomatic | Debomatic/process.py | Python | gpl-3.0 | 6,727 | 0 | # Deb-o-Matic
#
# Copyright (C) 2011-2021 Luca Falavigna
#
# Author: Luca Falavigna <dktrkranz@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from concurrent.futures import as_completed, ThreadPoolExecutor
from atexit import register as on_exit
from fcntl import flock, LOCK_EX, LOCK_NB, LOCK_UN
from hashlib import sha256
from | logging import basicConfig as log, error, | getLogger, info
from signal import signal, SIGINT, SIGTERM
from sys import stdin, stdout, stderr
from threading import Timer as _Timer
from time import sleep
from Debomatic import dom
from .exceptions import DebomaticError
class Process:
def __init__(self):
pass
def _daemonize(self):
try:
pid = os.fork()
if pid > 0:
exit()
except OSError as e:
error(_('Error entering daemon mode: %s') % e.strerror)
exit()
os.chdir('/')
os.setsid()
os.umask(0)
stdout.flush()
stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), stdin.fileno())
os.dup2(so.fileno(), stdout.fileno())
os.dup2(se.fileno(), stderr.fileno())
on_exit(self._quit)
old_log = getLogger()
if old_log.handlers:
for handler in old_log.handlers:
old_log.removeHandler(handler)
log(filename=self.logfile, level=self.loglevel,
format='%(asctime)s %(levelname)-8s %(message)s')
self._set_pid()
def _get_pid(self):
self.pidfile = ('/var/run/debomatic-%s' %
self._sha256(self.incoming))
try:
with open(self.pidfile, 'r') as fd:
self.pid = int(fd.read().strip())
except (IOError, ValueError):
self.pid = None
def _lock(self, wait=False):
self.fd = None
self.lockfile = ('/var/run/debomatic-%s.lock' %
self._sha256(self.incoming))
try:
self.fd = open(self.lockfile, 'w')
flags = LOCK_EX if wait else LOCK_EX | LOCK_NB
flock(self.fd, flags)
except (OSError, IOError) as ex:
if self.fd:
self.fd.close()
raise ex
def _notify_systemd(self):
try:
import systemd.daemon
systemd.daemon.notify('READY=1')
except (ImportError, SystemError):
pass
def _quit(self, signum=None, frame=None):
info(_('Waiting for threads to complete...'))
dom.periodic_event.cancel()
dom.pool.shutdown()
self.mod_sys.execute_hook('on_quit')
self._unlock()
os.unlink(self.pidfile)
exit()
def _set_pid(self):
self.pidfile = ('/var/run/debomatic-%s' %
self._sha256(self.incoming))
pid = str(os.getpid())
with open(self.pidfile, 'w+') as fd:
fd.write('%s\n' % pid)
def _sha256(self, value):
lock_sha = sha256()
lock_sha.update(value.encode('utf-8'))
return lock_sha.hexdigest()
def _unlock(self):
if self.fd:
flock(self.fd, LOCK_UN)
self.fd.close()
self.fd = None
if os.path.isfile(self.lockfile):
os.unlink(self.lockfile)
def shutdown(self):
self._get_pid()
if not self.pid:
return
info(_('Waiting for threads to complete...'))
try:
dom.periodic_event.cancel()
os.kill(self.pid, SIGTERM)
self._lock(wait=True)
except OSError as err:
err = str(err)
if err.find('No such process') > 0:
if os.path.exists(self.pidfile):
os.unlink(self.pidfile)
else:
error(err)
def startup(self):
try:
self._lock()
except (OSError, IOError):
error(_('Another instance is running, aborting'))
raise DebomaticError
self._set_pid()
signal(SIGINT, self._quit)
signal(SIGTERM, self._quit)
if self.daemonize:
self._daemonize()
self._notify_systemd()
dom.periodic_event.start()
self.launcher()
class ModulePool:
def __init__(self, workers=1):
self._jobs = {}
self._pool = ThreadPoolExecutor(workers)
def _launch(self, func, hook, dependencies):
if dependencies:
for dependency in dependencies:
while True:
if dependency in self._jobs.keys():
self._jobs[dependency].result()
break
else:
sleep(0.1)
func(hook)
def schedule(self, func, hook):
innerfunc, args, module, hookname, dependencies = hook
job = self._pool.submit(self._launch, func, hook, dependencies)
self._jobs[module] = job
def shutdown(self):
for job in as_completed([self._jobs[j] for j in self._jobs]):
job.result()
self._pool.shutdown()
class ThreadPool:
def __init__(self, workers=1):
self._jobs = []
self._pool = ThreadPoolExecutor(workers)
def _finish(self, job):
try:
self._jobs.remove(job)
except ValueError:
pass
try:
e = job.exception()
if e:
raise e
except Exception as e:
error(str(e), exc_info=True)
def schedule(self, func):
job = self._pool.submit(func)
job.add_done_callback(self._finish)
self._jobs.append(job)
def shutdown(self):
for job in as_completed(self._jobs):
job.result()
self._pool.shutdown()
class Timer(_Timer):
def run(self):
while not self.finished.is_set():
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
|
yaybu/touchdown | touchdown/frontends/__init__.py | Python | apache-2.0 | 761 | 0 | # Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing perm | issions and
# limitations under the License.
from .cloudwatch import CloudWatchFrontend
from .console import ConsoleFrontend
from .multi import MultiFrontend
__all__ = ["CloudWatchFronte | nd", "ConsoleFrontend", "MultiFrontend"]
|
yancz1989/cancer | Qi/img_mask_gen.py | Python | mit | 2,636 | 0.024659 | #!/usr/bin/python3
import config as cfg
import SimpleITK as sitk
import numpy as np
import csv
from glob import glob
import pandas as pd
from tqdm import tqdm
import tools
import os
def mk | dir(d):
try:
os.makedirs(d)
except:
pass
df_node = pd.read_csv(cfg.root+"annotations.csv")
# Getting list of image file | s
for subset in range(10):
print("processing subset ",subset)
luna_subset_path = os.path.join(cfg.root,"data","subset{}".format(subset))
file_list=sorted(glob(os.path.join(luna_subset_path,"*.mhd")))
#print(file_list)
output_path = os.path.join(cfg.root,'img_mask', 'subset{}'.format(subset))
mkdir(output_path)
# Looping over the image files in the subset
for img_file in tqdm(file_list):
sid = img_file.split('/')[-1][:-4];
hashid = sid.split('.')[-1];
sid_node = df_node[df_node["seriesuid"]==sid] #get all nodules associate with file
#print(sid)
#print(sid_node)
#load images
numpyImage, numpyOrigin, numpySpacing = tools.load_itk_image(img_file)
#load nodules infomation
#print(numpyImage.shape)
nodules = [];
for i in range(sid_node.shape[0]):
xyz_world = np.array([sid_node.coordX.values[i],sid_node.coordY.values[i],sid_node.coordZ.values[i]]);
xyz = tools.worldToVoxelCoord(xyz_world, numpyOrigin, numpySpacing);
d_world = sid_node.diameter_mm.values[i];
assert numpySpacing[0]==numpySpacing[1]
d = d_world/numpySpacing[0];
xyzd = tuple(np.append(xyz,d))
nodules.append(xyzd)
h = numpySpacing[2]/numpySpacing[0];
#print(nodules)
#Lung mask
lungMask = tools.segment_lung_mask(numpyImage,speedup=2);
#save images (to save disk, only save every other image/mask pair, and the nodule location slices)
zs = list(range(1,numpyImage.shape[0],2)) #odd slices
zs = sorted(zs + [int(x[2]) for x in nodules if x[2]%2==0]);
minPixels = 0.02*numpyImage.shape[1]*numpyImage.shape[2];
for z in zs:
if np.sum(lungMask[z])<minPixels:
continue
img,mask = tools.get_img_mask(numpyImage, h, nodules, nth=-1,z=z);
img = (img*lungMask[z]).astype(np.uint8)
mask = mask.astype(np.uint8)
np.save(os.path.join(output_path,"image_%s_%03d.npy" % (hashid, z)),img)
if np.sum(mask)>1:#if not mask, do not need to save it
np.save(os.path.join(output_path,"mask_%s_%03d.npy" % (hashid, z)),mask)
#break
#break
|
DataDog/airbrakepy | airbrakepy/__init__.py | Python | apache-2.0 | 142 | 0.014085 | from me | tadata import version as __version__
from metadata import source_url as __source_url__
from metadata import app_name as __app_nam | e__
|
sol/pygments | tests/test_shell.py | Python | bsd-2-clause | 4,709 | 0.000425 | # -*- coding: utf-8 -*-
"""
Basic Shell Tests
~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.token import Token
from pygments.lexers import BashLexer, BashSessionLexer
class BashTest(unittest.TestCase):
def setUp(self):
self.lexer = BashLexer()
self.maxDiff = None
def testCurlyNoEscapeAndQuotes(self):
fragment = u'echo "${a//["b"]/}"\n'
tokens = [
(Token.Name.Builtin, u'echo'),
(Token.Text, u' '),
(Token.Literal.String.Double, u'"'),
(Token.String.Interpol, u'${'),
(Token.Name.Variable, u'a'),
(Token.Punctuation, u'//['),
(Token.Literal.String.Double, u'"b"'),
(Token.Punctuation, u']/'),
(Token.String.Interpol, u'}'),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def testCurlyWithEscape(self):
fragment = u'echo ${a//[\\"]/}\n'
tokens = [
(Token.Name.Builtin, u'echo'),
(Token.Text, u' '),
(Token.String.Interpol, u'${'),
(Token.Name.Variable, u'a'),
(Token.Punctuation, u'//['),
(Token.Literal.String.Escape, u'\\"'),
(Token.Punctuation, u']/'),
(Token.String.Interpol, u'}'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def testParsedSingle(self):
fragment = u"a=$'abc\\''\n"
tokens = [
(Token.Name.Variable, u'a'),
(Token.Operator, u'='),
(Token.Literal.String.Single, u"$'abc\\''"),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, l | ist(self.lexer.get_tokens(fragment)))
| def testShortVariableNames(self):
fragment = u'x="$"\ny="$_"\nz="$abc"\n'
tokens = [
# single lone $
(Token.Name.Variable, u'x'),
(Token.Operator, u'='),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'$'),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'\n'),
# single letter shell var
(Token.Name.Variable, u'y'),
(Token.Operator, u'='),
(Token.Literal.String.Double, u'"'),
(Token.Name.Variable, u'$_'),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'\n'),
# multi-letter user var
(Token.Name.Variable, u'z'),
(Token.Operator, u'='),
(Token.Literal.String.Double, u'"'),
(Token.Name.Variable, u'$abc'),
(Token.Literal.String.Double, u'"'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def testArrayNums(self):
fragment = u'a=(1 2 3)\n'
tokens = [
(Token.Name.Variable, u'a'),
(Token.Operator, u'='),
(Token.Operator, u'('),
(Token.Literal.Number, u'1'),
(Token.Text, u' '),
(Token.Literal.Number, u'2'),
(Token.Text, u' '),
(Token.Literal.Number, u'3'),
(Token.Operator, u')'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def testEndOfLineNums(self):
fragment = u'a=1\nb=2 # comment\n'
tokens = [
(Token.Name.Variable, u'a'),
(Token.Operator, u'='),
(Token.Literal.Number, u'1'),
(Token.Text, u'\n'),
(Token.Name.Variable, u'b'),
(Token.Operator, u'='),
(Token.Literal.Number, u'2'),
(Token.Text, u' '),
(Token.Comment.Single, u'# comment\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
class BashSessionTest(unittest.TestCase):
def setUp(self):
self.lexer = BashSessionLexer()
self.maxDiff = None
def testNeedsName(self):
fragment = u'$ echo \\\nhi\nhi\n'
tokens = [
(Token.Text, u''),
(Token.Generic.Prompt, u'$'),
(Token.Text, u' '),
(Token.Name.Builtin, u'echo'),
(Token.Text, u' '),
(Token.Literal.String.Escape, u'\\\n'),
(Token.Text, u'hi'),
(Token.Text, u'\n'),
(Token.Generic.Output, u'hi\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
|
krishauser/Klampt | Python/klampt/math/autodiff/pytorch.py | Python | bsd-3-clause | 6,627 | 0.015995 | import klampt.math.autodiff.ad as ad
import torch,numpy as np
class TorchModuleFunction(ad.ADFunctionInterface):
"""Converts a PyTorch function to a Klamp't autodiff function class."""
def __init__(self,module):
self.module=module
self._eval_params=[]
torch.set_default_dtype(torch.float64)
def __str__(self):
return str(self.module)
def n_in(self,arg):
return -1
def n_out(self):
return -1
def eval(self,*args):
self._eval_params=[]
for a in args:
if not isinstance(a,np.ndarray):
a=np.array([a])
p=torch.Tensor(a)
p.requires_grad_(True)
self._eval_params.append(p)
try:
self._eval_result=torch.flatten(self.module(*self._eval_params))
#self._eval_result.forward()
except Exception as e:
print('Torch error: %s'%str(e))
return self._eval_result.detach().numpy()
def derivative(self,arg,*args):
#lazily check if forward has been done before
if not self._same_param(*args):
self.eval(*args)
rows=[]
for i in range(self._eval_result.shape[0]):
if self._eval_params[arg].grad is not None:
self._eval_params[arg].grad.zero_()
#this is a major performance penalty, torch does not support jacobian
#we have to do it row by row
self._eval_result[i].backward(retain_graph=True)
rows.append(self._eval_params[arg].grad.detach().numpy().flatten())
return np.vstack(rows)
def jvp(self,arg,darg,*args):
raise NotImplementedError('')
def _same_param(self,*args):
if not hasattr(self,"_eval_params"):
return False
if len(self._eval_params)!=len(args):
return False
for p,a in zip(self._eval_params,args):
pn = p.detach().numpy()
if not isinstance(a,np.ndarray):
a=np.array([a])
if pn.shape != a.shape:
return False
if (pn!=a).any():
return False
return True
class ADModule(torch.autograd.Function):
"""Converts a Klamp't autodiff function call or function instance to a
PyTorch Function. The class must be created with the terminal symbols
corresponding to the PyTorch arguments to which this is called.
"""
@staticmethod
def forward(ctx,func,terminals,*args):
torch.set_default_dtype(torch.float64)
if len(args)!=len(terminals):
raise ValueError("Function %s expected to have %d arguments, instead got %d"%(str(func),len(terminals),len(args)))
if isinstance(func,ad.ADFunctionCall):
context={}
for t,a in zip(terminals,args):
context[t.name]=a.detach().numpy()
ret=func.eval(**context)
elif isinstance(func,ad.ADFunctionInterface):
context=[]
for t,a in zip(terminals,args):
context.append(a.detach().numpy())
ret=func.eval(*context)
else:
raise ValueError("f must be a ADFunctionCall or ADFunctionInterface")
ctx.saved_state=(func,terminals,context)
return torch.Tensor(ret)
@staticmethod
def backward(ctx,grad):
ret = [None,None]
func,terminals,context = ctx.saved_state
if isinstance(func,ad.ADFunctionCall):
for k in range(len(terminals)):
if isinstance(terminals[k],ad.ADTerminal):
name = terminals[k].name
else:
name = terminals[k]
deriv=torch.Tensor(func.derivative(name,**context))
ret.append(deriv.T@grad)
elif isinstance(func,ad.ADFunctionInterface):
for k in range(len(terminals)):
deriv=torch.Tensor(func.derivative(k,*context))
ret.append(deriv.T@grad)
else:
raise ValueError("f must be a ADFunctionCall or ADFunctionInterface")
return tuple(ret)
@staticmethod
def check_derivatives_torch(func,terminals,h=1e-6,rtol=1e-2,atol=1e-3):
#sample some random parameters of the appropriate length
if isinstance(func,ad.ADFunctionInterface):
params=[]
for i in range(len(terminals)):
try:
N = func.n_in(i)
if N < 0:
N = 10
except NotImplementedError:
N = 10
params.append(torch.randn(N))
else:
N = 10
params = [torch.randn(N) for i in range(len(terminals))]
for p in params:
p.requires_grad_(True)
torch.autograd.gradcheck(ADModule.apply,tuple([func,terminals]+params),eps=h,atol=atol,rtol=rtol,raise_exception=True)
def torch_to_ad(module,args):
"""Converts a PyTorch function applied to args (list of scalars or numpy
arrays) to a Klamp't autodiff function call on those arguments."""
wrapper=TorchModuleFunction(module)
return wrapper(*args)
def ad_to_torch(func,terminals=None):
"""Converts a Klamp't autodiff function call or function instance to a
PyTorch Function. If terminals is provided, this is the list of arguments
that PyTorch will expect. Otherwise, the variables in the expression
will be automatically determined by the forward traversal order."""
if terminals is None:
if isinstance(func,ad.ADFunctionCall):
terminals = func.terminals()
else:
n_args = func.n_args()
terminals = [func.argname(i) for i in range(n_args)]
else:
if isinstance(func,ad.ADFunctionCall):
fterminals = func.terminals()
if len(terminals) != len(fterminals):
raise ValueError("The number of terminals provided is incorrect")
for t in terminals:
if isinstance(t,ad.ADTerminal):
name = t.name
else:
name | = t
if name not in fterminals:
raise ValueError("Invalid terminal %s, function call %s only has terminals %s"%(name,str(func),str(terminals)))
else:
try:
if len(terminals) != func.n_args():
raise ValueError("Invalid number of terminals, func | tion %s expects %d"%(str(func),func.n_args()))
except NotImplementedError:
pass
return ADModule(func,terminals) |
mazvv/travelcrm | travelcrm/resources/resources_types.py | Python | gpl-3.0 | 643 | 0 | # -*-coding: utf-8 -*-
from zope.interface import implementer
from ..interfaces import (
IResourceType,
)
from ..resources import (
| Resour | ceTypeBase,
)
from ..lib.utils.common_utils import translate as _
@implementer(IResourceType)
class ResourcesTypesResource(ResourceTypeBase):
__name__ = 'resources_types'
@property
def allowed_assign(self):
return True
@property
def allowed_permisions(self):
permisions = (
super(ResourcesTypesResource, self).allowed_permisions
)
permisions.append(
('settings', _(u'settings'))
)
return permisions
|
uqyge/combustionML | FPV_ANN_pureResNet/FPV_resnet_fullycoupled.py | Python | mit | 5,137 | 0.009733 | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.models import Model
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint
from resBlock import res_block_org
from data_reader import read_hdf_data, read_hdf_data_psi
from writeANNProperties import writeANNProperties
from keras import backend as K
from keras.models import load_model
import ast
##########################
# Parameters
n_neuron = 500
branches = 3
scale = 3
batch_size = 1024*4
epochs = 2000
vsplit = 0.1
batch_norm = False
# define the type of scaler: MinMax or Standard
scaler = 'Standard' # 'Standard' 'MinMax'
##########################
labels = []
with open('GRI_species_order_reduced', 'r') as f:
species = f.readlines()
for line in species:
# remove linebreak which is the last character of the string
current_place = line[:-1]
# add item to the list
labels.append(current_place)
# append other fields: heatrelease, T, PVs
#labels.append('heatRelease')
labels.append('T')
labels.append('PVs')
# tabulate psi, mu, alpha
labels.append('psi')
labels.append('mu')
labels.append('alpha')
# DO NOT CHANGE THIS ORDER!!
input_features=['f','zeta','pv']
# read in the data
X, y, df, in_scaler, out_scaler = read_hdf_data_psi('./tables_of_fgm.H5',key='of_tables',
in_labels=input_features, labels = labels,scaler=scaler)
# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.01)
# %%
print('set up ANN')
# ANN parameters
dim_input = X_train.shape[1]
dim_label = y_train.shape[1]
# This returns a tensor
inputs = Input(shape=(dim_input,))#,name='input_1')
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu')(inputs)
#
# x = res_block(x, scale, n_neuron, stage=1, block='a', bn=batch_norm,branches=branches)
# x = res_block(x, scale, n_neuron, stage=1, block='b', bn=batch_norm,branches=branches)
# x = res_block(x, scale, n_neuron, stage=1, block='c', bn=batch_norm,branches=branches)
x = res_block_org(x, n_neur | on, stage=1, block='a', bn=batch_norm)
x = res_block_org(x, n_neuron, stage=1, block='b', bn=batch_norm)
x = res_block_org(x, n_neuron, stage | =1, block='c', bn=batch_norm)
#x = res_block(x, n_neuron, stage=1, block='d', bn=batch_norm)
predictions = Dense(dim_label, activation='linear')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# get the model summary
model.summary()
# checkpoint (save the best model based validate loss)
filepath = "./tmp/weights.best.cntk.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=10)
callbacks_list = [checkpoint]
# fit the model
history = model.fit(
X_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=vsplit,
verbose=2,
callbacks=callbacks_list,
shuffle=True)
#%%
model.load_weights("./tmp/weights.best.cntk.hdf5")
# cntk.combine(model.outputs).save('mayerTest.dnn')
# # %%
# ref = df.loc[df['p'] == 40]
# x_test = in_scaler.transform(ref[['p', 'he']])
predict_val = model.predict(X_test)
X_test_df = pd.DataFrame(in_scaler.inverse_transform(X_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
sp='PVs'
# loss
fig = plt.figure()
plt.semilogy(history.history['loss'])
if vsplit:
plt.semilogy(history.history['val_loss'])
plt.title('mse')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.savefig('./exported/Loss_%s_%s_%i.eps' % (sp,scaler,n_neuron),format='eps')
plt.show(block=False)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
plt.figure()
plt.title('Error of %s ' % sp)
plt.plot((y_test_df[sp] - predict_df[sp]) / y_test_df[sp])
plt.title(sp)
plt.savefig('./exported/Error_%s_%s_%i.eps' % (sp,scaler,n_neuron),format='eps')
plt.show(block=False)
plt.figure()
plt.scatter(predict_df[sp],y_test_df[sp],s=1)
plt.title('R2 for '+sp)
plt.savefig('./exported/R2_%s_%s_%i.eps' % (sp,scaler,n_neuron),format='eps')
plt.show(block=False)
# %%
a=(y_test_df[sp] - predict_df[sp]) / y_test_df[sp]
test_data=pd.concat([X_test_df,y_test_df],axis=1)
pred_data=pd.concat([X_test_df,predict_df],axis=1)
test_data.to_hdf('sim_check.H5',key='test')
pred_data.to_hdf('sim_check.H5',key='pred')
# Save model
sess = K.get_session()
saver = tf.train.Saver(tf.global_variables())
saver.save(sess, './exported/my_model')
model.save('FPV_ANN_tabulated_%s.H5' % scaler)
# write the OpenFOAM ANNProperties file
writeANNProperties(in_scaler,out_scaler,scaler)
# Convert the model to
#run -i k2tf.py --input_model='FPV_ANN_tabulated_Standard.H5' --output_model='exported/FPV_ANN_tabulated_Standard.pb'
|
eunchong/build | scripts/slave/recipe_modules/ios/test_api.py | Python | bsd-3-clause | 1,200 | 0.009167 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
from recipe_engine import recipe_test_ap | i
class iOSTestApi(recipe_test_api.RecipeTestApi):
@recipe_test_api.mod_test_data
@staticmethod
def build_config(config):
return deepcopy(config)
def make_test_build_config(self, config):
return self.build_config(config)
@recipe_test_api.mod_test_data
@staticmethod
def parent_build_config(config):
return deepcopy(config)
def make_test_build_config_for_parent(self, config):
return self.parent_build_config(config)
def host_info(self):
return self.m.json.output({
| 'Mac OS X Version': '1.2.3',
'Xcode Version': '6.7.8',
'Xcode Build Version': '5D342509a',
'Xcode SDKs': [
'fake sdk 1.0',
'fake sdk 1.1',
'fake sdk 2.0',
],
})
def test_results(self):
return self.m.json.output({
'links': {
'fake URL text': 'fake URL',
},
'logs': {
'fake log': [
'fake log line 1',
'fake log line 2',
],
}
})
|
anhstudios/swganh | data/scripts/templates/object/tangible/deed/event_perk/shared_rebel_3x10_honorguard_deed.py | Python | mit | 487 | 0.045175 | #### NOTICE: | THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/event_perk/shared_rebel_3x10_honorguard_deed.iff"
result.attribute_template_id = 2
result.stfName("event_perk","rebel_3x10_honorguard_deed_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return resu | lt |
ajhalme/kbsim | KBGUI.py | Python | gpl-3.0 | 16,260 | 0.009102 | import pygame
from pygame.locals import *
import sys; sys.path.insert(0, "./lib/")
from pgu import gui
from Vec2d import *
##
## KBSimulation graphics
##
class KBGUI(gui.container.Container):
def __init__(self, module):
## Module
self.module = module
self.config = module.config
self.dmode = self.config['designer']
self.smode = not self.dmode
## UI - Display
self.screensize = (self.config['width'],
self.config['height'] + self.config['uiheight'])
self.w, self.h = self.screensize
self.uiheight = self.config['uiheight']
pygame.init()
self.screen = pygame.display.set_mode(self.screensize)
self.screen.fill(self.config['screenfill'])
pygame.display.set_caption("Kilobot App")
## UI - Elements
self.font = pygame.font.Font(None, 15)
self.trans = self.config['transparent']
self.botmove = None
self.botsel = None
self.noisestat = 0.0
self.drawlinks = True # c
self.drawdebug = True # d
self.drawreddots = True # e
self.drawgrid = True # g
self.drawinfo = True # i
self.drawradio = True # r
self.drawtraffic = False # t
self.drawui = True # u
if self.dmode:
self.drawinfo = False
elif self.smode:
self.drawgrid = False
def update_border_collisions(self):
# dodge, collision detection on borders
for a in self.module.bots:
r = a.radius
x,y = a.pos.inttup()
if x < r:
a.pos.__setitem__(0,r)
if x > (self.w - r):
a.pos.__setitem__(0,self.w - r)
if y < self.uiheight + r: # UI bar
a.pos.__setitem__(1,self.uiheight + r)
if y > (self.h - r):
a.pos.__setitem__(1,self.h - r)
def event(self, e):
escape = (e.type == KEYUP and e.key == K_ESCAPE)
if (e.type == QUIT) or escape:
self.module.running = False
self.module.app.running = False
elif e.type == KEYUP:
self.keyUp(e.key)
elif e.type == KEYDOWN:
self.keyDown(e.key)
elif e.type == MOUSEBUTTONUP:
self.mouseUp(e.button, e.pos)
elif e.type == MOUSEMOTION:
self.mouseMotion(e.buttons, e.pos, e.rel)
def draw(self): # draw, lower layers first
self.screen.fill(self.config['screenfill']) # refresh
if (self.drawgrid):
g = self.config['gridsquare']
col = self.config['gridcolor']
ui = self.config['uiheight']
for i in range(1 + (self.h / g)):
pygame.draw.line(self.screen, col, (0,ui + i*g), (self.w, ui + i*g))
for i in range(1 + (self.w / g)):
pygame.draw.line(self.screen, col, (i*g,0), (i*g, self.h))
if (self.drawradio):
if ( self.smode and
(self.module.inoise > 0 or self.module.anoise > 100)):
for a in self.module.bots: # draw radio bubble
surf = pygame.Surface((a.rradius * 2, a.rradius*2))
surf.fill(self.trans)
surf.set_colorkey(self.trans)
pos = a.pos.inttup()
pygame.draw.circle(surf, a.view.colorr +(0,),
(a.rradius,a.rradius), a.rradius)
surf.set_alpha(100)
self.screen.blit(surf, (pos[0] - a.rradius, pos[1] -a.rradius))
else:
for a in self.module.bots: # draw simple radio ranges
if (a.tx_enabled == 1):
a.view.drawradio(self.screen)
if (self.smode and self.module.abstract): # draw a connectivity graph
for i in range(0,len(self.module.bots)): # draw an edge if close enough to transmit
for j in range(i+1, len(self.module.bots)):
a = self.module.bots[i]
b = self.module.bots[j]
dist = a.pos.get_distance(b.pos)
bound = a.rradius
if dist < bound:
pygame.draw.aaline(self.screen, self.config['graphc'],
a.pos.inttup(), b.pos.inttup(), 2)
for a in self.module.bots: # draw abstraction
a.view.drawsimple(self.screen) # vertex
else:
for a in self.module.bots: # draw prope | r bot
if (self.dmode):
degree = self.module.linkc[a.secretID]
if (degree == 0):
a.view.color = self.config['botcolorbase0']
| elif (degree == 1):
a.view.color = self.config['botcolorbase1']
elif (degree == 2):
a.view.color = self.config['botcolorbase2']
else:
a.view.color = self.config['botcolorbasex']
a.view.draw(self.screen)
if (self.drawinfo):
for a in self.module.bots: # draw info: secretID and coords
a.view.drawinfo(self.screen)
if (self.drawtraffic):
for a in self.module.bots:
a.view.drawtx(self.screen)
for a in self.module.bots:
a.view.drawrx(self.screen)
if (self.drawdebug):
for a in self.module.bots: # draw debug
a.view.drawdebug(self.screen)
if (self.drawui):
bg = pygame.Surface((self.w, self.config['uiheight']))
bg.fill(self.config['screenfill'])
col = self.config['uicolor']
f = self.font.render
self.screen.blit(bg, (0,0)) # bg
if (self.smode):
fpsd = f("fps:%3d/%3d" % (self.module.clock.get_fps(), self.module.fps), True, col)
off = 525
self.screen.blit(fpsd, (off + 10, 5))
roundd = f("round:%06d" % (self.module.round), True, col)
self.screen.blit(roundd, (off + 10, 15))
inoised = f("inoise:%3d" % (self.module.inoise), True, col)
self.screen.blit(inoised, (off + 95, 5))
anoised = f("anoise:%3d" % (self.module.anoise), True, col)
self.screen.blit(anoised, (off + 95, 15))
noisestatd = f("%3d%%" % (int(100*self.module.noisestat)), True, col)
self.screen.blit(noisestatd, (off + 175, 10))
elif (self.dmode):
pass
if (self.drawlinks and self.dmode):
col1 = self.config['botcolorselg']
col2 = self.config['botcolorselr']
for conn in self.module.links:
a = conn[0]; b = conn[1]
diff = a.pos.get_distance(b.pos)
col = col1 if (diff < self.config['maxdist']) else col2
pygame.draw.line(self.screen, col, a.pos.inttup(), b.pos.inttup(), 2)
if (self.botsel != None and self.botselpos != None):
diff = self.botsel.pos.get_distance(self.botselpos)
col = col1 if (diff < self.config['maxdist']) else col2
pygame.draw.line(self.screen, col,
self.botsel.pos.inttup(),
self.botselpos.inttup(), 2)
if (self.drawreddots and self.dmode):
for i in range(len(self.module.reddots)):
r = self.module.reddots[i]
text = self.font.render("%d" % (i+2), True, (225, 0, 0))
self.screen.blit(text, r.inttup())
def keyDown(self, key):
pass
def keyUp(self, key):
if (key == K_a):
if self.smode:
self.module.abstract = not self. |
rmanoni/mi-instrument | mi/instrument/nortek/aquadopp/ooicore/test/test_driver.py | Python | bsd-2-clause | 28,532 | 0.004802 | """
@package mi.instrument.nortek.aquadopp.ooicore.test.test_driver
@author Rachel Manoni
@brief Test cases for ooicore driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u
$ bin/test_driver -i
$ bin/test_driver -q
* From pyon
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore -a UNIT
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore -a INT
$ bin/nosetests -s -v /Users/Bill/WorkSpace/marine-integrations/mi/instrument/nortek/aquadopp/ooicore -a QUAL
"""
__author__ = 'Rachel Manoni, Ronald Ronquillo'
__license__ = 'Apache 2.0'
import time
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.instrument.nortek.vector.ooicore.test.test_driver import bad_sample
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey
from mi.instrument.nortek.test.test_driver import DriverTestMixinSub
from mi.core.instrument.instrument_driver import DriverConfigKey, ResourceAgentState
from mi.core.instrument.data_particle import DataParticleKey, DataParticleValue
from mi.core.instrument.chunker import StringChunker
from mi.core.exceptions import SampleException
from mi.instrument.nortek.aquadopp.ooicore.driver import NortekDataParticleType
from mi.instrument.nortek.aquadopp.ooicore.driver import AquadoppDwVelocityDataParticle
from mi.instrument.nortek.aquadopp.ooicore.driver import AquadoppDwVelocityDataParticleKey
from mi.instrument.nortek.test.test_driver import NortekUnitTest, NortekIntTest, NortekQualTest, user_config2
from mi.instrument.nortek.driver import ProtocolState, ProtocolEvent, TIMEOUT, Parameter, NortekEngIdDataParticleKey, \
NortekInstrumentProtocol, NEWLINE, EngineeringParameter
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.nortek.aquadopp.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='nortek_aquadopp_dw_ooicore',
instrument_agent_name='nortek_aquadopp_dw_ooicore_agent',
instrument_agent_packet_config=NortekDataParticleType(),
driver_startup_config={
DriverConfigKey.PARAMETERS: {
Parameter.DEPLOYMENT_NAME: 'test',
Parameter.COMMENTS: 'this is a test',
#update the following two parameters to allow for faster collecting of samples during testing
Parameter.AVG_INTERVAL: 1,
Parameter.MEASUREMENT_INTERVAL: 1}}
)
def eng_id_sample():
sample_as_hex = "415144"
return sample_as_hex.decode('hex')
eng_id_particle = [{DataParticleKey.VALUE_ID: NortekEngIdDataParticleKey.ID, DataParticleKey.VALUE: "AQD 8493 "}]
def velocity_sample():
sample_as_hex = "a5011500101926221211000000009300f83b810628017f01002d0000e3094c0122ff9afe1e1416006093"
| return sample_as_hex.decode('hex')
velocity_particle = [{'value_id': Aqua | doppDwVelocityDataParticleKey.TIMESTAMP, 'value': '26/11/2012 22:10:19'},
{'value_id': AquadoppDwVelocityDataParticleKey.ERROR, 'value': 0},
{'value_id': AquadoppDwVelocityDataParticleKey.ANALOG1, 'value': 0},
{'value_id': AquadoppDwVelocityDataParticleKey.BATTERY_VOLTAGE, 'value': 147},
{'value_id': AquadoppDwVelocityDataParticleKey.SOUND_SPEED_ANALOG2, 'value': 15352},
{'value_id': AquadoppDwVelocityDataParticleKey.HEADING, 'value': 1665},
{'value_id': AquadoppDwVelocityDataParticleKey.PITCH, 'value': 296},
{'value_id': AquadoppDwVelocityDataParticleKey.ROLL, 'value': 383},
{'value_id': AquadoppDwVelocityDataParticleKey.STATUS, 'value': 45},
{'value_id': AquadoppDwVelocityDataParticleKey.PRESSURE, 'value': 0},
{'value_id': AquadoppDwVelocityDataParticleKey.TEMPERATURE, 'value': 2531},
{'value_id': AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM1, 'value': 332},
{'value_id': AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM2, 'value': -222},
{'value_id': AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM3, 'value': -358},
{'value_id': AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM1, 'value': 30},
{'value_id': AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM2, 'value': 20},
{'value_id': AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM3, 'value': 22}]
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python, mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class AquadoppDriverTestMixinSub(DriverTestMixinSub):
"""
Mixin class used for storing data particle constance and common data assertion methods.
"""
#Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
#this particle can be used for both the velocity particle and the diagnostic particle
_sample_velocity_diagnostic = {
AquadoppDwVelocityDataParticleKey.TIMESTAMP: {TYPE: unicode, VALUE: '', REQUIRED: True},
AquadoppDwVelocityDataParticleKey.ERROR: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.ANALOG1: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.BATTERY_VOLTAGE: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.SOUND_SPEED_ANALOG2: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.HEADING: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.PITCH: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.ROLL: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.PRESSURE: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.TEMPERATURE: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM1: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM2: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.VELOCITY_BEAM3: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM1: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM2: {TYPE: int, VALUE: 0, REQUIRED: True},
AquadoppDwVelocityDataParticleKey.AMPLITUDE_BEAM3: {TYPE: int, VALUE: 0, REQUIRED: True}
}
def assert_particle_velocity(self, data_particle, verify_values=False):
"""
Verify velpt_velocity_data
@param data_particle AquadoppDwVelocityDataParticleKey data particle
@param verify_ |
pombredanne/bokeh | bokeh/charts/tests/test_stats.py | Python | bsd-3-clause | 818 | 0.00489 | import pytest
from bokeh.charts.stats import Bins
from bokeh.models import ColumnDataSource
import pandas as pd
@pytest.fixture
def ds(test_data):
return ColumnDataSource(test_data.auto_data)
def test_explicit_bin_count(ds):
b = Bins(source=ds, column='mpg', bin_cou | nt=2)
assert len(b.bins) == 2
def test_a | uto_bin_count(ds):
b = Bins(source=ds, column='mpg')
assert len(b.bins) == 12
# this should test it still matches
# http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram
# with iterables with the same value
b = Bins(values=[5,5,5,5,5], bin_count=None)
assert len(b.bins) == 3
def test_bin_labeling(ds):
Bins(source=ds, column='cyl', bin_count=2)
assert len(pd.Series(ds.data['cyl_bin']).drop_duplicates()) == 2
|
FedoraScientific/salome-paravis | test/VisuPrs/MeshPresentation/F1.py | Python | lgpl-2.1 | 1,515 | 0.00198 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public L | icense for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
# |
# This case corresponds to: /visu/MeshPresentation/F1 case
# Create Mesh Presentation for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("MeshPresentation/F1")
file = datadir + "ml.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.MESH], picturedir, pictureext)
|
tedder/ansible-modules-core | system/systemd.py | Python | gpl-3.0 | 14,880 | 0.003965 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: systemd
author:
- "Ansible Core Team"
version_added: "2.2"
short_description: Manage services.
description:
- Controls systemd services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['unit', 'service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
masked:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
daemon_reload:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
user:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run systemctl talking to the service manager of the calling user, rather than the service manager
of the system.
notes:
- One option other than name is required.
requirements:
- A system managed by systemd
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
# Example action to restart service cron on centos, in all cases, also issue deamon-reload to pick up config changes
- systemd: state=restarted daemon_reload: yes name=crond
# Example action to reload service httpd, in all cases
- systemd: name=httpd state=reloaded
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
# Example action to enable a timer for dnf-automatic
- systemd:
name: dnf-automatic.timer
state: started
enabled: True
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
| "InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"Inactiv | eExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
|
ninegrid/dotfiles-vim | bundle/vim-orgmode/ftplugin/orgmode/liborgmode/documents.py | Python | unlicense | 7,913 | 0.028687 | # -*- coding: utf-8 -*-
"""
documents
~~~~~~~~~
TODO: explain this :)
"""
from UserList import UserList
from orgmode.liborgmode.base import MultiPurposeList, flatten_list, Direction, get_domobj_range
from orgmode.liborgmode.headings import Heading, HeadingList
class Document(object):
u"""
Representation of a whole org-mode document.
A Document consists basically of headings (see Headings) and some metadata.
TODO: explain the 'dirty' mechanism
"""
def __init__(self):
u"""
Don't call this constructor directly but use one of the concrete
implementations.
TODO: what are the concrete implementatiions?
"""
object.__init__(self)
# is a list - only the Document methods should work on this list!
self._content = None
self._dirty_meta_information = False
self._dirty_document = False
self._meta_information = MultiPurposeList(on_change=self.set_dirty_meta_information)
self._orig_meta_information_len = None
self._headings = HeadingList(obj=self)
self._deleted_headings = []
# settings needed to align tags properly
self._tabstop = 8
self._tag_column = 77
self.todo_states = [u'TODO', u'DONE']
def __unicode__(self):
if self.meta_information is None:
return u'\n'.join(self.all_headings())
return u'\n'.join(self.meta_information) + u'\n' + u'\n'.join([u'\n'.join([unicode(i)] + i.body) for i in self.all_headings()])
def __str__(self):
return self.__unicode__().encode(u'utf-8')
def get_all_todo_states(self):
u""" Convenience function that returns all todo and done states and
sequences in one big list.
:returns: [all todo/done states]
"""
return flatten_list(self.get_todo_states())
def get_todo_states(self):
u""" Returns a list containing a tuple of two lists of allowed todo
states split by todo and done states. Multiple todo-done state
sequences can be defined.
:returns: [([todo states], [done states]), ..]
"""
return self.todo_states
def tabstop():
u""" Tabstop for this document """
def fget(self):
return self._tabstop
def fset(self, value):
self._tabstop = value
return locals()
tabstop = property(**tabstop())
def tag_column():
u""" The column all tags are right-aligned to """
def fget(self):
return self._tag_column
def fset(self, value):
| self._tag_column = value
return locals()
tag_column = property(**tag_column())
def init_dom(self, heading=Heading):
u""" Initialize all headings in document - build DOM. This method
should be call prior to accessing the document.
:returns: self
"""
def init_heading(_h):
u"""
:returns the initialized heading
"""
start = _h.end + 1
prev_heading = None
while True:
new_heading = self.find_heading(start, heading=heading)
| # * Heading 1 <- heading
# * Heading 1 <- sibling
# or
# * Heading 2 <- heading
# * Heading 1 <- parent's sibling
if not new_heading or \
new_heading.level <= _h.level:
break
# * Heading 1 <- heading
# * Heading 2 <- first child
# * Heading 2 <- another child
new_heading._parent = _h
if prev_heading:
prev_heading._next_sibling = new_heading
new_heading._previous_sibling = prev_heading
_h.children.data.append(new_heading)
# the start and end computation is only
# possible when the new heading was properly
# added to the document structure
init_heading(new_heading)
if new_heading.children:
# skip children
start = new_heading.end_of_last_child + 1
else:
start = new_heading.end + 1
prev_heading = new_heading
return _h
h = self.find_heading(heading=heading)
# initialize meta information
if h:
self._meta_information.data.extend(self._content[:h._orig_start])
else:
self._meta_information.data.extend(self._content[:])
self._orig_meta_information_len = len(self.meta_information)
# initialize dom tree
prev_h = None
while h:
if prev_h:
prev_h._next_sibling = h
h._previous_sibling = prev_h
self.headings.data.append(h)
init_heading(h)
prev_h = h
h = self.find_heading(h.end_of_last_child + 1, heading=heading)
return self
def meta_information():
u"""
Meta information is text that precedes all headings in an org-mode
document. It might contain additional information about the document,
e.g. author
"""
def fget(self):
return self._meta_information
def fset(self, value):
if self._orig_meta_information_len is None:
self._orig_meta_information_len = len(self.meta_information)
if type(value) in (list, tuple) or isinstance(value, UserList):
self._meta_information[:] = flatten_list(value)
elif type(value) in (str, ):
self._meta_information[:] = value.decode(u'utf-8').split(u'\n')
elif type(value) in (unicode, ):
self._meta_information[:] = value.split(u'\n')
self.set_dirty_meta_information()
def fdel(self):
self.meta_information = u''
return locals()
meta_information = property(**meta_information())
def headings():
u""" List of top level headings """
def fget(self):
return self._headings
def fset(self, value):
self._headings[:] = value
def fdel(self):
del self.headings[:]
return locals()
headings = property(**headings())
def write(self):
u""" write the document
:returns: True if something was written, otherwise False
"""
raise NotImplementedError(u'Abstract method, please use concrete impelementation!')
def set_dirty_meta_information(self):
u""" Mark the meta information dirty so that it will be rewritten when
saving the document """
self._dirty_meta_information = True
def set_dirty_document(self):
u""" Mark the whole document dirty. When changing a heading this
method must be executed in order to changed computation of start and
end positions from a static to a dynamic computation """
self._dirty_document = True
@property
def is_dirty(self):
u"""
Return information about unsaved changes for the document and all
related headings.
:returns: Return True if document contains unsaved changes.
"""
if self.is_dirty_meta_information:
return True
if self.is_dirty_document:
return True
if self._deleted_headings:
return True
return False
@property
def is_dirty_meta_information(self):
u""" Return True if the meta information is marked dirty """
return self._dirty_meta_information
@property
def is_dirty_document(self):
u""" Return True if the document is marked dirty """
return self._dirty_document
def all_headings(self):
u""" Iterate over all headings of the current document in serialized
order
:returns: Returns an iterator object which returns all headings of
the current file in serialized order
"""
if not self.headings:
raise StopIteration()
h = self.headings[0]
while h:
yield h
h = h.next_heading
raise StopIteration()
def find_heading(
self, position=0, direction=Direction.FORWARD,
heading=Heading, connect_with_document=True):
u""" Find heading in the given direction
:postition: starting line, counting from 0 (in vim you start
counting from 1, don't forget)
:direction: downwards == Direction.FORWARD,
upwards == Direction.BACKWARD
:heading: Heading class from which new heading objects will be
instanciated
:connect_with_document: if True, the newly created heading will be
connected with the document, otherwise not
:returns: New heading object or None
"""
(start, end) = get_domobj_range(content=self._content, position=position, direction=direction, identify_fun=heading.identify_heading)
if start is not None and end is None:
end = len(self._content) - 1
if start is not None and end is not None:
return heading.parse_heading_from_data(
self._content[start:end + 1], self.get_all_todo_states(),
document=self if connect_with_document else None, orig_start=start)
# vim: set noexpandtab:
|
colour-science/colour | colour/algebra/regression.py | Python | bsd-3-clause | 1,998 | 0 | """
Regression
==========
Defines various objects to perform regression:
- :func:`colour.algebra.least_square_mapping_MoorePenrose`: *Least-squares*
mapping using *Moore-Penrose* inverse.
References
----------
- :cite:`Finlayson2015` : Finlayson, G. D., MacKiewicz, M., & Hurlbert, A.
(2015). Color Correction Using Root-Polynomial Regression. IEEE
Transactions | on Image Processing, 24(5), 1460-1470.
doi:10.1109/TIP.2015.2405336
"""
from __future__ import annotations
import numpy as np
from colour.hints import ArrayLike, NDArray
__author__ = "Colour Developers"
__copyright__ = "Copyright | 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"least_square_mapping_MoorePenrose",
]
def least_square_mapping_MoorePenrose(y: ArrayLike, x: ArrayLike) -> NDArray:
"""
Compute the *least-squares* mapping from dependent variable :math:`y` to
independent variable :math:`x` using *Moore-Penrose* inverse.
Parameters
----------
y
Dependent and already known :math:`y` variable.
x
Independent :math:`x` variable(s) values corresponding with :math:`y`
variable.
Returns
-------
:class:`numpy.ndarray`
*Least-squares* mapping.
References
----------
:cite:`Finlayson2015`
Examples
--------
>>> prng = np.random.RandomState(2)
>>> y = prng.random_sample((24, 3))
>>> x = y + (prng.random_sample((24, 3)) - 0.5) * 0.5
>>> least_square_mapping_MoorePenrose(y, x) # doctest: +ELLIPSIS
array([[ 1.0526376..., 0.1378078..., -0.2276339...],
[ 0.0739584..., 1.0293994..., -0.1060115...],
[ 0.0572550..., -0.2052633..., 1.1015194...]])
"""
y = np.atleast_2d(y)
x = np.atleast_2d(x)
return np.dot(np.transpose(x), np.linalg.pinv(np.transpose(y)))
|
youtube/cobalt | third_party/llvm-project/lldb/third_party/Python/module/pexpect-2.4/examples/bd_serv.py | Python | bsd-3-clause | 10,471 | 0.001528 | #!/usr/bin/env python
"""Back door shell server
This exposes an shell terminal on a socket.
--hostname : sets the remote host name to open an ssh connection to.
--username : sets the user name to login with
--password : (optional) sets the password to login with
--port : set the local port for the server to listen on
--watch : show the virtual screen after each client request
"""
# Having the password on the command line is not a good idea, but
# then this entire project is probably not the most security concious thing
# I've ever built. This should be considered an experimental tool -- at best.
import pxssh
import pexpect
import ANSI
import time
import sys
import os
import getopt
import getpass
import traceback
import threading
import socket
def exit_with_usage(exit_code=1):
print globals()['__doc__']
os._exit(exit_code)
class roller (threading.Thread):
"""This runs a function in a loop in a thread."""
def __init__(self, interval, function, args=[], kwargs={}):
"""The interval parameter defines time between each call to the function.
"""
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = threading.Event()
def cancel(self):
"""Stop the roller."""
self.finished.set()
def run(self):
while not self.finished.isSet():
# self.finished.wait(self.interval)
self.function(*self.args, **self.kwargs)
def endless_poll(child, prompt, screen, refresh_timeout=0.1):
"""This keeps the screen updated with the output of the child. This runs in
a separate thread. See roller(). """
#child.logfile_read = screen
try:
s = child.read_nonblocking(4000, 0.1)
screen.write(s)
except:
pass
# while True:
# #child.prompt (timeout=refresh_timeout)
# try:
# #child.read_nonblocking(1,timeout=refresh_timeout)
# child.read_nonblocking(4000, 0.1)
# except:
# pass
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''This forks the current process into a daemon. Almost none of this is
necessary (or advisable) if your daemon is being started by inetd. In that
case, stdin, stdout and stderr are all set up for you to refer to the
network connection, and the fork()s and session manipulation should not be
done (to avoid confusing inetd). Only the chdir() and umask() steps remain
as useful.
References:
UNIX Programming FAQ
1.7 How do I get my program to act like a daemon?
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
Advanced Programming in the Unix Environment
W. Richard Stevens, 1992, Addison-Wesley, ISBN 0-201-56317-7.
The stdin, stdout, and stderr arguments are file names that will be opened
and be used to replace the standard file descriptors in sys.stdin,
sys.stdout, and sys.stderr. These arguments are optional and default to
/dev/null. Note that stderr is opened unbuffered, so if it shares a file
with stdout then interleaved output may not appear in the order that you
expect. '''
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError as e:
sys.stderr.write("f | ork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
|
# Now I am a daemon!
# Redirect standard file descriptors.
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# I now return as the daemon
return 0
def add_cursor_blink(response, row, col):
i = (row - 1) * 80 + col
return response[:i] + \
'<img src="http://www.noah.org/cursor.gif">' + response[i:]
def main():
try:
optlist, args = getopt.getopt(
sys.argv[
1:], 'h?d', [
'help', 'h', '?', 'hostname=', 'username=', 'password=', 'port=', 'watch'])
except Exception as e:
print str(e)
exit_with_usage()
command_line_options = dict(optlist)
options = dict(optlist)
# There are a million ways to cry for help. These are but a few of them.
if [elem for elem in command_line_options if elem in [
'-h', '--h', '-?', '--?', '--help']]:
exit_with_usage(0)
hostname = "127.0.0.1"
port = 1664
username = os.getenv('USER')
password = ""
daemon_mode = False
if '-d' in options:
daemon_mode = True
if '--watch' in options:
watch_mode = True
else:
watch_mode = False
if '--hostname' in options:
hostname = options['--hostname']
if '--port' in options:
port = int(options['--port'])
if '--username' in options:
username = options['--username']
print "Login for %s@%s:%s" % (username, hostname, port)
if '--password' in options:
password = options['--password']
else:
password = getpass.getpass('password: ')
if daemon_mode:
print "daemonizing server"
daemonize()
# daemonize('/dev/null','/tmp/daemon.log','/tmp/daemon.log')
sys.stdout.write('server started with pid %d\n' % os.getpid())
virtual_screen = ANSI.ANSI(24, 80)
child = pxssh.pxssh()
child.login(hostname, username, password)
print 'created shell. command line prompt is', child.PROMPT
#child.sendline ('stty -echo')
# child.setecho(False)
virtual_screen.write(child.before)
virtual_screen.write(child.after)
if os.path.exists("/tmp/mysock"):
os.remove("/tmp/mysock")
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
localhost = '127.0.0.1'
s.bind('/tmp/mysock')
os.chmod('/tmp/mysock', 0o777)
print 'Listen'
s.listen(1)
print 'Accept'
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#localhost = '127.0.0.1'
#s.bind((localhost, port))
# print 'Listen'
# s.listen(1)
r = roller(0.01, endless_poll, (child, child.PROMPT, virtual_screen))
r.start()
print "screen poll updater started in background thread"
sys.stdout.flush()
try:
while True:
conn, addr = s.accept()
print 'Connected by', addr
data = conn.recv(1024)
if data[0] != ':':
cmd = ':sendline'
arg = data.strip()
else:
request = data.split(' ', 1)
if len(request) > 1:
cmd = request[0].strip()
arg = request[1].strip()
else:
cmd = request[0].strip()
if cmd == ':exit':
r.cancel()
break
elif cmd == ':sendline':
child.sendline(arg)
# child.prompt(timeout=2)
time.sleep(0.2)
shell_window = str(virtual_screen)
elif cmd == ':send' or cmd == ':xsend':
if cmd == ':xsend':
arg = arg.decode("hex")
child.send(arg)
time.sleep(0.2)
shell_window = str(virtual_screen)
elif cmd == ':cursor':
shell_window = '%x%x' % (
virtual_screen.cur_r, virtual_screen.cur_c)
elif cmd == ':refresh':
shell_window = str(virtual_screen)
response = []
response.append(shell_window)
#response = add_cursor_blink (response, row, col)
sent = conn.send('\n'.join(response))
|
mrakitin/sirepo | tests/template/srw_generate_data/nsls-ii-hxn-beamline-ssa-closer.py | Python | apache-2.0 | 36,110 | 0.004957 | #!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['S1', 'S1_HCM', 'HCM', 'HCM_DCM_C1', 'DCM_C1', 'DCM_C2', 'DCM_C2_HFM', 'HFM', 'After_HFM', 'After_HFM_CRL1', 'CRL1', 'CRL2', 'CRL2_Before_SSA', 'Before_SSA', 'SSA', 'SSA_Before_FFO', 'Before_FFO', 'AFFO', 'FFO', 'FFO_At_Sample', 'At_Sample']
for el_name in names:
if el_name == 'S1':
# S1: aperture 26.62m
el.append(srwlib.SRWLOptA(
_shape=v.op_S1_shape,
_ap_or_ob='a',
_Dx=v.op_S1_Dx,
_Dy=v.op_S1_Dy,
_x=v.op_S1_x,
_y=v.op_S1_y,
))
pp.append(v.op_S1_pp)
elif el_name == 'S1_HCM':
# S1_HCM: drift 26.62m
el.append(srwlib.SRWLOptD(
_L=v.op_S1_HCM_L,
))
pp.append(v.op_S1_HCM_pp)
elif el_name == 'HCM':
# HCM: sphericalMirror 28.35m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_HCM_r,
_size_tang=v.op_HCM_size_tang,
_size_sag=v.op_HCM_size_sag,
_nvx=v.op_HCM_nvx,
_nvy=v.op_HCM_nvy,
_nvz=v.op_HCM_nvz,
_tvx=v.op_HCM_tvx,
_tvy=v.op_HCM_tvy,
_x=v.op_HCM_x,
_y=v.op_HCM_y,
))
pp.append(v.op_HCM_pp)
elif el_name == 'HCM_DCM_C1':
# HCM_DCM_C1: drift 28.35m
el.append(srwlib.SRWLOptD(
_L=v.op_HCM_DCM_C1_L,
))
pp.append(v.op_HCM_DCM_C1_pp)
elif el_name == 'DCM_C1':
# DCM_C1: crystal 30.42m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_DCM_C1_d_sp,
_psi0r=v.op_DCM_C1_psi0r,
_psi0i=v.op_DCM_C1_psi0i,
_psi_hr=v.op_DCM_C1_psiHr,
_psi_hi=v.op_DCM_C1_psiHi,
_psi_hbr=v.op_DCM_C1_psiHBr,
_psi_hbi=v.op_DCM_C1_psiHBi,
_tc=v.op_DCM_C1_tc,
_ang_as=v.op_DCM_C1_ang_as,
)
crystal.set_orient(
_nvx=v.op_DCM_C1_nvx,
_nvy=v.op_DCM_C1_nvy,
_nvz=v.op_DCM_C1_nvz,
_tvx=v.op_DCM_C1_tvx,
_tvy=v.op_DCM_C1_tvy,
)
el.append(crystal)
pp.append(v.op_DCM_C1_pp)
elif el_name == 'DCM_C2':
# DCM_C2: crystal 30.42m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_DCM_C2_d_sp,
_psi0r=v.op_DCM_C2_psi0r,
_psi0i=v.op_DCM_C2_psi0i,
_psi_hr=v.op_DCM_C2_psiHr,
_psi_hi=v.op_DCM_C2_psiHi,
_psi_hbr=v.op_DCM_C2_psiHBr,
_psi_hbi=v.op_DCM_C2_psiHBi,
_tc=v.op_DCM_C2_tc,
_ang_as=v.op_DCM_C2_ang_as,
)
crystal.set_orient(
_nvx=v.op_DCM_C2_nvx,
_nvy=v.op_DCM_C2_nvy,
_nvz=v.op_DCM_C2_nvz,
_tvx=v.op_DCM_C2_tvx,
_tvy=v.op_DCM_C2_tvy,
)
el.append(crystal)
pp.append(v.op_DCM_C2_pp)
elif el_name == 'DCM_C2_HFM':
# DCM_C2_HFM: drift 30.42m
el.append(srwlib.SRWLOptD(
_L=v.op_DCM_C2_HFM_L,
))
pp.append(v.op_DCM_C2_HFM_pp)
elif el_name == 'HFM':
# HFM: sphericalMirror 32.64m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_HFM_r,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
elif el_name == 'After_HFM':
# After_HFM: watch 32.64m
pass
elif el_name == 'After_HFM_CRL1':
# After_HFM_CRL1: drift 32.64m
el.append(srwlib.SRWLOptD(
_L=v.op_After_HFM_CRL1_L,
))
pp.append(v.op_After_HFM_CRL1_pp)
elif el_name == 'CRL1':
# CRL1: crl 34.15m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL1_foc_plane,
_delta=v.op_CRL1_delta,
_atten_len=v.op_CRL1_atten_len,
_shape=v.op_CRL1_shape,
_apert_h=v.op_CRL1_apert_h,
_apert_v=v.op_CRL1_apert_v,
_r_min=v.op_CRL1_r_min,
_n=v.op_CRL1_n,
_wall_thick=v.op_CRL1_wall_thick,
_xc=v.op_CRL1_x,
_yc=v.op_CRL1_y,
))
pp.append(v.op_CRL1_pp)
elif el_name == 'CRL2':
| # CRL2: crl 34.15m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL2_foc_plane,
_delta=v.op_CRL2_delta,
_at | ten_len=v.op_CRL2_atten_len,
_shape=v.op_CRL2_shape,
_apert_h=v.op_CRL2_apert_h,
_apert_v=v.op_CRL2_apert_v,
_r_min=v.op_CRL2_r_min,
_n=v.op_CRL2_n,
_wall_thick=v.op_CRL2_wall_thick,
_xc=v.op_CRL2_x,
_yc=v.op_CRL2_y,
))
pp.append(v.op_CRL2_pp)
elif el_name == 'CRL2_Before_SSA':
# CRL2_Before_SSA: drift 34.15m
el.append(srwlib.SRWLOptD(
_L=v.op_CRL2_Before_SSA_L,
))
pp.append(v.op_CRL2_Before_SSA_pp)
elif el_name == 'Before_SSA':
# Before_SSA: watch 61.75m
pass
elif el_name == 'SSA':
# SSA: aperture 61.75m
el.append(srwlib.SRWLOptA(
_shape=v.op_SSA_shape,
_ap_or_ob='a',
_Dx=v.op_SSA_Dx,
_Dy=v.op_SSA_Dy,
_x=v.op_SSA_x,
_y=v.op_SSA_y,
))
pp.append(v.op_SSA_pp)
elif el_name == 'SSA_Before_FFO':
# SSA_Before_FFO: drift 61.75m
el.append(srwlib.SRWLOptD(
_L=v.op_SSA_Before_FFO_L,
))
pp.append(v.op_SSA_Before_FFO_pp)
elif el_name == 'Before_FFO':
# Before_FFO: watch 109.0m
pass
elif el_name == 'AFFO':
# AFFO: aperture 109.0m
el.append(srwlib.SRWLOptA(
_shape=v.op_AFFO_shape,
_ap_or_ob='a',
_Dx=v.op_AFFO_Dx,
_Dy=v.op_AFFO_Dy,
_x=v.op_AFFO_x,
_y=v.op_AFFO_y,
))
pp.append(v.op_AFFO_pp)
elif el_name == 'FFO':
# FFO: lens 109.0m
el.append(srwlib.SRWLOptL(
_Fx=v.op_FFO_Fx,
_Fy=v.op_FFO_Fy,
_x=v.op_FFO_x,
_y=v.op_FFO_y,
))
pp.append(v.op_FFO_pp)
elif el_name == 'FFO_At_Sample':
# FFO_At_Sample: drift 109.0m
el.append(srwlib.SRWLOptD(
_L=v.op_FFO_At_Sample_L,
))
pp.append(v.op_FFO_At_Sample_pp)
elif el_name == 'At_Sample':
# At_Sample: watch 109.018147m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'NSLS-II HXN beamline: SSA closer', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e |
cherry-hyx/hjb-test | pluginsAgent/RichAPM/packages/dictconfig.py | Python | artistic-2.0 | 22,792 | 0.000219 | #
# Copyright (C) 2009-2013 Vinay Sajip. See LICENSE.txt for details.
#
import logging.handlers
import re
import sys
try:
basestring
except NameError:
basestring = str
try:
StandardError
except NameError:
StandardError = Exception
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
| result.key = key
return result
def pop(self, key, defaul | t=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext': 'ext_convert',
'cfg': 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
"Allows the importer to be redefined."
def __init__(self, config):
"""
Initialise an instance with the specified configuration
dictionary.
"""
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
# print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
# rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and \
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter |
borg-project/borg | borg/experiments/simulate_runs.py | Python | mit | 6,090 | 0.009031 | """@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import uuid
import sklearn
import condor
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class PortfolioMaker(object):
def __init__(self, portfolio_name, bins = 60):
name_parts = portfolio_name.split(":")
self.name = portfolio_name
self.subname = name_parts[0]
self.variants = name_parts[1:]
self._bins = bins
def __call__(self, suite, train_data, test_data = None, model_kwargs = {}):
full_data = train_data
logger.info("making portfolio %s; model_kwargs: %s", self.name, model_kwargs)
if "knapsack" in self.variants:
planner = borg.planners.ReorderingPlanner(borg.planners.KnapsackPlanner())
elif "streeter" in self.variants:
planner = borg.planners.StreeterPlanner()
else:
planner = borg.planners.default
if self.subname == "random":
portfolio = borg.portfolios.RandomPortfolio()
elif self.subname == "uniform":
portfolio = borg.portfolios.UniformPortfolio()
elif self.subname == "baseline":
portfolio = borg.portfolios.BaselinePortfolio(suite, train_data)
elif self.subname == "oracle":
portfolio = borg.portfolios.OraclePortfolio()
else:
bins = self._bins
if self.subname.endswith("-mul"):
estimator = borg.models.MulEstimator(**model_kwargs)
elif self.subname.endswith("-dir"):
estimator = borg.models.MulDirMatMixEstimator(**model_kwargs)
elif self.subname.endswith("-log"):
estimator = borg.models.DiscreteLogNormalMatMixEstimator(**model_kwargs)
else:
raise ValueError("unrecognized portfolio subname: {0}".format(self.subname))
train_data = train_data.only_nontrivial(train_data.common_budget / bins) # XXX ?
model = estimator(train_data, bins, full_data)
if self.subname.startswith("preplanning-"):
portfolio = borg.portfolios.PreplanningPortfolio(suite, model, planner = planner)
elif self.subname.startswith("probabilistic-"):
regress = borg.regression.NearestRTDRegression(model)
portfolio = borg.portfolios.PureModelPortfolio(suite, model, regress)
else:
raise ValueError("unrecognized portfolio subname: {0}".format(self.subname))
return borg.solver_io.RunningPortfolioFactory(portfolio, suite)
class SolverMaker(object):
def __init__(self, solver_name):
self.name = solver_name
self.subname = solver_name
def __call__(self, suite, train_data, test_data = None, model_kwargs = {}):
return suite.solvers[self.name]
def simulate_run(run, maker, train_data, test_data):
"""Simulate portfolio execution on a train/test split."""
split_id = uuid.uuid4()
budget = test_data.common_budget
#budget = test_data.common_budget / 4
suite = borg.fake.FakeSuite(test_data)
solver = maker(suite, train_data, test_data)
rows = []
for (i, instance_id) in enumerate(test_data.run_lists):
logger.info("simulating run %i/%i on %s", i, len(test_data), instance_id)
with suite.domain.task_from_path(instance_id) as instance:
with borg.accounting() as accountant:
answer = solver.start(instance).run_then_stop(budget)
succeeded = suite.domain.is_final(instance, answer)
cpu_cost = accountant.total.cpu_seconds
logger.info(
"%s %s on %s (%.2f CPU s)",
maker.name,
"succeeded" if succeeded else "failed",
os.path.basename(instance),
cpu_cost,
)
success_str = "TRUE" if succeeded else "FALSE"
rows.append([run["category"], maker.name, budget, cpu_cost, success_str, split_id])
return rows
@borg.annotations(
out_path = ("results CSV output path"),
runs = ("path to JSON runs specification", "positional", None, borg.util.load_json),
repeats = ("number of times to repeat each run", "option", None, int),
workers = ("submit jobs?", "option", "w", int),
local = ("workers are local?", "flag"),
)
def main(out_path, runs, repeats = 5, workers = 0, local = False):
"""Simulate portfolio and solver behavior."""
logger.info("simulating %i runs", len(runs) * repeats)
get_run_data = borg.util.memoize(borg.storage.RunData.from_bundle)
def yield_jobs():
for run in runs:
train_data = get_run_data(run["train_bundle"])
if run.get("only_nontrivial", False):
train_data = train_data.only_nontrivial()
if run["test_bundle"] == "-":
validation = sklearn.cross_validation.KFold(len(train_data), repeats, indices = False)
data_sets = [(train_data.masked(v), train_data.masked(e)) for (v, e) in validation]
else:
test_data = get_run_data(run["test_bundle"])
if run.get("only_nontrivial", False):
test_data = test_data.only_nontrivial()
data_sets = [(train_data, test_data)] * repeats
if run["portfolio_name"] == | "-":
makers = map(SolverMaker, t | rain_data.solver_names)
else:
makers = [PortfolioMaker(run["portfolio_name"])]
for maker in makers:
for (train_fold_data, test_fold_data) in data_sets:
yield (simulate_run, [run, maker, train_fold_data, test_fold_data])
with borg.util.openz(out_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["category", "solver", "budget", "cost", "success", "split"])
for (_, rows) in condor.do(yield_jobs(), workers, local):
writer.writerows(rows)
out_file.flush()
if __name__ == "__main__":
borg.script(main)
|
tejal29/pants | tests/python/pants_test/backend/codegen/tasks/test_antlr_gen.py | Python | apache-2.0 | 4,507 | 0.007544 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from textwrap import dedent
from twitter.common.dirutil.fileset import Fileset
from pants.backend.codegen.tasks.antlr_gen import AntlrGen
from pants.base.address import SyntheticAddress
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
class AntlrGenTest(NailgunTaskTestBase):
@classmethod
def task_type(cls):
return AntlrGen
PARTS = {'srcroot': 'testprojects/src/antlr',
'dir': 'this/is/a/directory',
'name': 'smoke',
'prefix': 'SMOKE'}
PACKAGE_RE = re.compile(r'^\s*package\s+(?P<package_name>[^\s]+)\s*;\s*$')
def setUp(self):
super(AntlrGenTest, self).setUp()
self.create_file(relpath='{srcroot}/{dir}/{prefix}.g4'.format(**self.PARTS),
contents=dedent('''
grammar {prefix};
////////////////////
start : letter EOF ;
letter : LETTER ;
////////////////////
fragment LETTER : [a-zA-Z] ;
'''.format(**self.PARTS)))
def create_context(self):
# generate a context to contain the build graph for the input target, then execute
antlr_target = self.target('{srcroot}/{dir}:{name}'.format(**self.PARTS))
return self.context(target_roots=[antlr_target])
def execute_antlr4_test(self, expected_package):
context = self.create_context()
task = self.execute(context)
# get the synthetic target from the private graph
task_outdir = os.path.join(task.workdir, 'antlr4', 'gen-java')
syn_sourceroot = os.path.join(task_outdir, self.PARTS['srcroot'])
syn_target_name = ('{srcroot}/{dir}.{name}'.format(**self.PARTS)).replace('/', '.')
syn_address = SyntheticAddress(spec_path=os.path.relpath(syn_sourceroot, self.build_root),
target_name=syn_target_name)
syn_target = context.build_graph.get_target(syn_address)
# verify that the synthetic target's list of sources match what are actually created
def re_relativize(p):
"""Take a path relative to task_outdir, and make it relative to the build_root"""
return os.path.relpath(os.path.join(task_outdir, p), self.build_root)
actual_sources = [re_relativize(s) for s in Fileset.rglobs('*.java', root=task_outdir)]
self.assertEquals(set(syn_target.sources_relative_to_buildroot()), set(actual_sources))
# and that the synthetic target has a valid source root and the generated sources have the
# expected java package
def get_package(path):
with open(path) as fp:
for line in fp:
match = self.PACKAGE_RE.match(line)
if match:
return match.group('package_name')
return None
for source in syn_target.sources_relative_to_source_root():
source_path = os.path.join(syn_sourceroot, source)
self.assertTrue(os.path.isfile(source_path),
"{0} is not the source root for {1}".format(syn_sourceroot, source))
self.assertEqual(expected_package, get_package(source_path))
def test_explicit_package(self):
self.add_to_build_file('{srcroot}/{dir}/BUILD'.format(**self.PARTS), dedent('''
java_antlr_library(
name='{name}',
compiler='antlr4',
package='this.is.a.package',
sources=['{prefix}.g4'],
)
'''.format(**self.PARTS)))
self.execute_antlr4_test('this.is.a.package')
def test_derived_package(self):
self.add_to_build_file('{srcroot}/{dir}/BUILD'.format(**self.PARTS), dedent('''
java_antlr_library(
name='{name}',
compiler='antlr4',
sources=['{prefix}.g4'],
)
'''.format(**self.PARTS)))
self.execu | te_antlr4_test(self.PARTS['dir'].replace('/', '.'))
def test_derived_package_invalid(self):
self.create_file(relpath='{srcroot}/{dir}/sub/not_read.g4'.format(**self.PARTS),
contents='// does not matter')
self.add_to_build_file('{srcroot}/{dir}/BUILD'.format(**self.PARTS), dedent('''
java_antlr_library(
name='{name}',
compiler='antlr4',
sources=['{prefix}.g4', 'sub/not_r | ead.g4'],
)
'''.format(**self.PARTS)))
with self.assertRaises(AntlrGen.AmbiguousPackageError):
self.execute(self.create_context())
|
zuun77/givemegoogletshirts | leetcode/python/714_best-time-to-buy-and-sell-stock-with-transaction-fee.py | Python | apache-2.0 | 335 | 0.002985 | class Solution:
def maxProfit(self, prices, fee):
dp = [[-prices[0]], [0]]
for i in range(1, len(prices)):
dp[0].append(max(dp[0][i-1], dp[1][i-1]-prices[i]))
dp[1].append(max(dp[0][i-1]+pr | ices[i]-fee, dp[1][i-1]))
return dp[1][-1]
print(Solution().maxProfit([1, 3, 2 | , 8, 4, 9], 2))
|
TU-Berlin/mathosphere | pomlp/lib/latex2mathml/setup.py | Python | apache-2.0 | 1,134 | 0.001764 | from distutils.core import setup
VERSION = open('VERSION').read()
setup(
name='latex2mathml',
version=VERSION,
packages=['latex2mathml'],
url='https://github.com/Code-ReaQtor/latex2mathml',
download_url='https://github.com/Code-ReaQtor/latex2mathml/tarball/{}'.format(VERSION),
license='MIT',
author='Ronie Martinez' | ,
author_email='ronmarti18@gmail.com',
description='Pure Python library for LaTeX to MathML conversion.',
long_description=open('README.rst').read(),
keywords=[],
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
| 'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: LaTeX'],
package_data={'latex2mathml': ['unimathsymbols.txt']}
)
|
simsor/PyWeb | tests/test.py | Python | gpl-3.0 | 260 | 0.038462 | #!/usr/bin/python
from PyWeb import *
from template import *
#f | rom PyWebDynamic import *
class TEST:
def __init__(self):
self.p = PyWeb | ("test.html")
self.p.addTemplate(myTemplate)
self.p.createPage()
if __name__ == "__main__":
ttestsst = TEST()
|
mysociety/yournextmp-popit | official_documents/urls.py | Python | agpl-3.0 | 376 | 0 | from __future__ import unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^upload/elec | tion/(?P<election>[^/]+)/post/(?P<post_id>[^/]+)/$',
views.CreateDocumentView.as_v | iew(),
name='upload_document_view'),
url(r'^(?P<pk>\d+)/$',
views.DocumentView.as_view(),
name='uploaded_document_view'),
]
|
cmsdaq/hltd | lib/cx_Oracle-7.1/samples/tutorial/clob_string.py | Python | lgpl-3.0 | 1,342 | 0.005961 | #------------------------------------------------------------------------------
# clob_string.py (Section 7.2)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
print("Inserting data...")
cur.execute("truncate table testclobs")
longString = ""
for i in range(5):
char = ch | r(ord('A') + i)
longString += char * 250
cur.execute("insert into testclobs values | (:1, :2)",
(i + 1, "String data " + longString + ' End of string'))
con.commit()
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.CLOB:
return cursor.var(cx_Oracle.LONG_STRING, arraysize = cursor.arraysize)
con.outputtypehandler = OutputTypeHandler
print("Querying data...")
cur.prepare("select * from testclobs where id = :id")
cur.execute(None, {'id': 1})
(id, clobdata) = cur.fetchone()
print("CLOB length:", len(clobdata))
print("CLOB data:", clobdata)
|
AjabWorld/ajabsacco | ajabsacco/wsgi_staging.py | Python | apache-2.0 | 401 | 0.002494 | """
WSGI config for ajabsacco project.
It exposes the WSGI callable as a modu | le-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ajabsacco.settings.staging")
from django.core.wsg | i import get_wsgi_application
application = get_wsgi_application()
|
ghchinoy/tensorflow | tensorflow/contrib/stateless/python/kernel_tests/stateless_random_ops_test.py | Python | apache-2.0 | 1,641 | 0.001219 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.stateless API.
The real tests are in python/kernel_tests/random/stateless_random_ops_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import stateless
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import test
class StatelessOpsTest(test.TestCase):
def testAPI(self):
self.assertIs(stateless.stateless_random_uniform | ,
stateless_random_ops.stateless_random_uniform)
self.assertIs(stateless.stateless_random_normal,
stateless_random_ops.stateless_random_normal)
self.assertIs(stateless.stateless_truncated_normal,
stateless_random_o | ps.stateless_truncated_normal)
self.assertIs(stateless.stateless_multinomial,
stateless_random_ops.stateless_multinomial)
if __name__ == '__main__':
test.main()
|
afajl/sy | setup.py | Python | bsd-3-clause | 1,024 | 0.000977 | # -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sy, sy.path
setup(
name='sy',
version=sy.__version__,
url='http://sy.afajl.com',
| license='BSD',
author='Paul Diaconescu',
author_email='p@afajl.com',
description='Simple tools for system administration tasks',
long_description=sy.path.slurp('README.rst'),
classifiers=[
'Development Status :: 3 - Alpha',
| 'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['sy', 'sy.net', 'sy.net.intf'],
package_data={
'sy': ['lib/*']
},
platforms='Python 2.4 and later on Unix',
install_requires=['logbook>=0.3', 'ipaddr>=2.0.0']
)
|
wooga/airflow | airflow/secrets/metastore.py | Python | apache-2.0 | 1,888 | 0.00053 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyrigh | t ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required b | y applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Objects relating to sourcing connections from metastore database
"""
from typing import List
from airflow.models.connection import Connection
from airflow.secrets import BaseSecretsBackend
from airflow.utils.session import provide_session
class MetastoreBackend(BaseSecretsBackend):
"""
Retrieves Connection object from airflow metastore database.
"""
# pylint: disable=missing-docstring
@provide_session
def get_connections(self, conn_id, session=None) -> List[Connection]:
conn_list = session.query(Connection).filter(Connection.conn_id == conn_id).all()
session.expunge_all()
return conn_list
@provide_session
def get_variable(self, key: str, session=None):
"""
Get Airflow Variable from Metadata DB
:param key: Variable Key
:return: Variable Value
"""
from airflow.models.variable import Variable
var_value = session.query(Variable).filter(Variable.key == key).first()
session.expunge_all()
if var_value:
return var_value.val
return None
|
surekap/fabric-recipes | fabfile/ntp_client.py | Python | gpl-3.0 | 2,215 | 0.008126 | #!/bin/env python
"""
#######################################################################
# #
# Copyright (c) 2012, Prateek Sureka. All Rights Reserved. #
# This module provides an idempotent mechanism to remotely configure #
# ntp sync to a server on a host. #
# #
#######################################################################
"""
from fabric.api import task, run, env
from fabric.colors import red
from utils import reconfigure, is_debian_or_ubuntu
env.warn_only = True
from config import config
NTP_CONF_PATH = "/etc/ntp.conf"
@task
def timezone(timezone=config.get("ntp_client", {}).get("ntp_timezone", "Asia/Calcutta")):
""" Set the timezone. """
if | not is_debian_or_ubuntu():
print red("Cannot deploy to non-debian/ubuntu host: %s" % env.host)
return
import apt
apt.ensure(tzdata="latest")
return run("cp -f /usr/share/zoneinfo/%s /etc/localtime" % timezone)
@task
def | configure(server=config.get("ntp_client", {}).get("ntp_server", "")):
""" Configure NTP sync to server. """
if not is_debian_or_ubuntu():
print red("Cannot deploy to non-debian/ubuntu host: %s" % env.host)
return
# Upload configuration
params = {'server':server}
reconfigure("ntp_client.conf.template", params, NTP_CONF_PATH)
@task
def deploy(server=config.get("ntp_client", {}).get("ntp_server", "")):
""" Install, configure and start ntp sync and timezone. """
if not is_debian_or_ubuntu():
print red("Cannot deploy to non-debian/ubuntu host: %s" % env.host)
return
import apt, service
packages = {"ntp":"latest", "ntpdate":"latest"}
apt.ensure(**packages)
configure()
# Sync with server
run("ntpdate %s" % server)
# Sync hardware clock to correct time
run("hwclock --systohc")
service.ensure(ntp="restarted")
timezone()
@task
def status():
""" List the servers with which the host is synchronized. """
print run("ntpq -p")
print run("ntpdc -p") |
CLPeters/DeFFNetIzer | rClickclass.py | Python | apache-2.0 | 12,183 | 0.019617 | #m04405a12:02 class to provide Tk right click menu
#import rClickclass as rC
#then do something like u = rC.rClick(root, "Pmwdemo")
#before root.mainlooop()
#add ,name= 'lbb' to any listbox to avoid an error with paste on those
#later versions will detect and disable those menu items
#should be a way to create the menu once and just config it at post time
#add sample manual _test for PMW, Tix & Tk
#explore usage in Qt, wx, GTK if they like Qt have rclick can items be added?
#add ini reading of options and better naming of widgets maybe interactively
#produce patches for many of the popular Tk tools adding rClick
#easygui patch, anygui patch for Tk backend at least.
import Tkinter as Tk
__version__ = '0.102'
#
# + helpers
# - get_selection_indices
# - has_selection
#
# AtEnd()
# refers to the last position in the text
#
# AtInsert()
# refers to the point where the text cursor is
#
# AtSelFirst()
# indicates the beginning point of the selected text
#
# AtSelLast()
# denotes the last point of the selected text and finally
#
# At(x[, y])
# refers to the character at pixel location x, y (with y not used in the case
# of a text entry widget, which contains a single line of text).
def get_selection_indices(e): #from Idle
try:
first = e.widget.index("sel.first")
last = e.widget.index("sel.last")
return first, last
except Tk.TclError:
return None, None
def has_selection(e): #kluge
try:
try:
selection_get = e.widget.selection_get
except AttributeError:
selection_get = e.widget.select_get
#if e.widget.selection_present(): noavail in Text
s = selection_get() #eventually find a better way
return s
except Tk.TclError:
pass
return None
class rClick:
'''
default right click context menu for all Tk Entry and Text widgets
this could be a sourceforge project on its own
planning to be a module, you import then maybe subclass
override some or all of the methods
instantiate it. it binds B3
future plans keep track of instances so each leo can have its own
for simple Tk apps they only need one and usually only c&p
maybe a submenu of specilized insert strings
this is definatly not the final refactor
I should study some other menu classes and add some testing
'''
#
# + rClick
# + Methods
# - __init__
# - rClick_Copy
# - rClick_Cut
# - rClick_Paste
# - rClick_Del
# - rClick_insert
# - rC_nclst
# - rC_menu
# - rC_config
# - rClicker
# - rClickbinder
def __init__(self, tKrooT, label= '',
toggleVar= 1, toggle_label= 'toggle' ):
#tKrooT=Tk.TopLevel()?
self.toggleVar = toggleVar #not sure what this might accomplish
self.toggle_label = toggle_label
self.label = label
self.rClickbinder(tKrooT)
self.e = None
def rClick_Copy(self, apnd= 0 ):
self.e.widget.event_generate('<Control-c>' )
def rClick_Cut(self ):
self.e.widget.event_generate('<Control-x>' )
def rClick_Paste(self ):
self.e.widget.event_generate('<Control-v>' )
def rClick_Del(self ):
s = has_selection(self.e )
if s:
first, last = get_selection_indices(self.e )
self.e.widget.delete(first, last )
def rClick_insert(self, s ): #insert s at the current curser position
self.e.widget.insert("insert",s )
def rC_nclst(self ):
nclst=[
('------',None),
(' Cut', self.rClick_Cut ),
(' Copy', self.rClick_Copy ),
(' Paste', self.rClick_Paste ),
#(' Delete', self.rClick_Del ),
#('------',None),
]
return nclst
def rC_menu(self, nclst):
#add a preparse to ensure no infinate recursion and well formed
#text,commands in menu tupples.
try:
rmenu = Tk.Menu(None, tearoff= 0, takefocus= 0 )
cas = {}
cascade = 0
for (txt, cmd) in nclst:
if txt.startswith('>>>>>>') or cascade:
if txt.startswith('>>>>>>') and cascade and cmd == None:
#done cascade
cascade = 0
rmenu.add_cascade(label= icmd, menu= cas[icmd] )
elif cascade:
if txt == ' ------ ':
cas[icmd].add_separator()
else: cas[icmd].add_command(label= txt, command= cmd )
else: # start cascade
cascade = 1
icmd = cmd[:]
cas[icmd] = Tk.Menu(rmenu, tearoff= 0, takefocus= 0 )
else:
if txt == ' ------ ':
rmenu.add_separator()
else: rmenu.add_command(label= txt, command= cmd )
except (Tk.TclError, TypeError, AttributeError):
rmenu = Tk.Menu(None, tearoff= 0, takefocus= 0 )
rmenu.add_command(label= ' Copy', command= self.rClick_Copy )
return rmenu
def rC_config(self, rmenu ):
try:
rmenu.entryconfigure(0, state= 'disabled' )
rmenu.entryconfigure(0, label= self.label )
#kindof hardwired but its just a demo
#the better way is to ask the menu for a list
#or only create a list with copy in it...
if self.e.widget._name == 'lbb':
#active, normal, or disabled
rmenu.activate("none" )
rmenu.entryconfigure(' Copy', state= 'normal' )
rmenu.entryconfigure('Copy', state= 'disabled' )
rmenu.entryconfigure('Cut', state= 'disabled' )
#for i in range(1,len(nclst)-3 ):
# if i == 2: continue #allow copy, disable the others
# rmenu.entryconfigure(i, state= 'disabled' )
# rmenu.add_checkbutton(label= self.toggle_label, variable= self.toggleVar )
except (Tk.TclError, TypeError, AttributeError):
pass
def rClicker(self, e ):
self.e = e
e.widget.focus()
#how to abstract this if it needs e for its functions?
nclst = self.rC_nclst()
#pass it the command list and return the Tk menu
#not sure how this worked outside the class parsing self.command
rmenu = self.rC_menu(nclst )
#pass it the command list & event and possibly modify Tk menu
#users can override this to provide per widget options
#like greying out options etc
self.rC_config(rmenu )
#possibly need an unpost if loose focus
rmenu.tk_popup(e.x_root-3, e.y_root+3,entry= "0" )
return "break"
def rClickbinder(self, r ):
for b in [ 'Text', 'Entry', 'Listbox', 'Label']: #
r.bind_class(b, sequence= '<Button-3>',
func= self.rClicker, add= '' )
#
# I wonder can I spawnl each of these to test all at once?
# + test status
# need to add Entry,Text,La | ble & Listbox to all
# and if possible automate the test somehow
# also have to show how to subclass and change nclst & config
#
# at least to prove all the binds are taking place.
#
# tested with win98 py2.33 from python.org
# - Tix_test() Tix is in | the help, I can import but error's
# - PMW_test() works
# - anygui_test() sets Tk backend, rClick up but copy ineffective
# probably it does something with the clicpboard
# or forgets to set the ^c binds on listbox?
# - TK_test() works
def Tix_test(): #http://Tix.sourceforge.net
import Tix
import rClickclass as rC
root = |
google/init2winit | init2winit/model_lib/models.py | Python | apache-2.0 | 4,094 | 0.00171 | # coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for the available models we can train."""
from init2winit.model_lib import adabelief_densenet
from init2winit.model_lib import adabelief_resnet
from init2winit.model_lib import adabelief_vgg
from init2winit.model_lib import autoencoder
from init2winit.model_lib import convolutional_autoencoder
from init2winit.model_lib import dlrm
from init2winit.model_lib import fully_connected
from init2winit.model_lib import gnn
from init2winit.model_lib import max_pooling_cnn
from init2winit.model_lib import mlperf_resnet
from init2winit.model_lib import nqm
from init2winit.model_lib import resnet
from init2winit.model_lib import simple_cnn
from init2winit.model_lib import transformer_lm
from init2winit.model_lib import vit
from init2winit.model_lib import wide_resnet
from init2winit.model_lib import xformer_translate
_ALL_MODELS = {
'fully_connected':
(fully_connected.FullyConnectedModel, fully_connected.DEFAULT_HPARAMS),
'simple_cnn': (simple_cnn.SimpleCNNModel, simple_cnn.DEFAULT_HPARAMS),
'max_pooling_cnn':
(max_pooling_cnn.MaxPoolingCNNModel, max_pooling_cnn.DEFAULT_HPARAMS),
'wide_resnet': (wide_resnet.WideResnetModel, wide_resnet.DEFAULT_HPARAMS),
'resnet': (resnet.ResnetModel, resnet.DEFAULT_HPARAMS),
'adabelief_densenet': (adabelief_densenet.AdaBeliefDensenetModel,
adabelief_densenet.DEFAULT_HPARAMS),
'adabelief_resnet': (adabelief_resnet.AdaBeliefResnetModel,
adabelief_resnet.DEFAULT_HPARAMS),
'adabelief_vgg':
(adabelief_vgg.AdaBeliefVGGModel, adabelief_vgg.DEFAULT_HPARAMS),
'autoencoder': (autoencoder.AutoEncoderModel, autoencoder.DEFAULT_HPARAMS),
'convolutional_autoencoder':
(convolutional_autoencoder.ConvAutoEncoderModel,
convolutional_autoencoder.DEFAULT_HPARAMS),
'fake_resnet':
(mlperf_resnet.FakeModel, mlperf_resnet.FAKE_MODEL_DEFAULT_HPARAMS),
'mlperf_resnet':
(mlperf_resnet.ResnetModelMLPerf, mlperf_resnet.MLPERF_DEFAULT_HPARAMS),
'transformer':
(transformer_lm.TransformerLM1B, transformer_lm.DEFAULT_HPARAMS),
'nqm': (nqm.NQM, nqm.DEFAULT_HPARAMS),
'xformer_translate': (xformer_translate.TransformerTranslate,
xformer_translate.DEFAULT_HPARAMS),
'gnn': (gnn.GNNModel, gnn.DEFAULT_HPARAMS),
'dlrm': (dlrm.DLRMModel, dlrm.DEFAULT_HPARAMS),
'vit': (vit.ViTModel, | vit.DEFAULT_HPARAMS),
}
def get_model(model_name):
"""Get the corresponding model class based on the model string.
API:
mode | l_builder, hps = get_model("fully_connected")
... modify/parse hparams
model = model_builder(hps, num_classes)
Args:
model_name: (str) e.g. fully_connected.
Returns:
The model architecture (currently a flax Model) along with its
default hparams.
Raises:
ValueError if model is unrecognized.
"""
try:
return _ALL_MODELS[model_name][0]
except KeyError:
raise ValueError('Unrecognized model: {}'.format(model_name)) from None
def get_model_hparams(model_name):
"""Get the corresponding model hyperparameters based on the model string.
Args:
model_name: (str) e.g. fully_connected.
Returns:
The model architecture (currently a flax Model) along with its
default hparams.
Raises:
ValueError if model is unrecognized.
"""
try:
return _ALL_MODELS[model_name][1]
except KeyError:
raise ValueError('Unrecognized model: {}'.format(model_name)) from None
|
NateV/GrammarDev | grammar_dev/grammars/case_info_grammar.py | Python | gpl-2.0 | 2,165 | 0.005548 | from parsimonious import Grammar
from parsimonious import NodeVisitor
from grammar_dev.grammars.CustomNodeVisitorFactory import CustomVisitorFactory
grammars = [
r"""
# Nonterminals
case_info = (new_line? assigned_filed_initiated the_rest) /
(new_line? line assigned_filed_initiated the_rest) /
(new_line? line line assigned_filed_initiated the_rest)
assigned_filed_initiated = ws* judge_assigned ws ws ws+ date_filed ws ws ws+ date_initiated ws* new_line
judge_assigned = judge_assigned_label ws judge_assigned_name?
judge_assigned_name = (single_content_char !(ws ws))+ single_content_char
date_filed = date_filed_label ws date_filed_date #"Date Filed: 01/03/2011"
date_filed_date = date &ws
date_initiated = date_initiated_label ws date_initiated_date #" | Initiation Date: 01/03/2011"
date_initiated_date = date &new_line
the_rest = line*
# Silent helper nonterminals (don't include in list of terminals)
line = single_content_char* new_line?
date = number forward_slash number forward_slash number
# S | ilent Terminals (should be consumed and not returned. Don't include
# in list of terminals.)
judge_assigned_label = "Judge Assigned:"
date_filed_label = "Date Filed:"
date_initiated_label = "Initiation Date:"
# Loud Terminals (include in list of terminals)
number = ~"[0-9]"+
forward_slash = "/"
single_content_char = ~"[a-z0-9`\ \"=_\.,\-\(\)\'\$\?\*%;:#&\[\]/@§]"i
new_line = "\n"
ws = " "
""",
r"""
# Nonterminals
case_info = new_line? line* new_line*
line = single_content_char* new_line
# Terminals
single_content_char = ~"[a-z0-9`\ \"=_\.,\-\(\)\'\$\?\*%;:#&\[\]/@§]"i
new_line = "\n"
"""
]
nonterminals = ["case_info", "judge_assigned",
"date_filed", "date_initiated", "the_rest"]
terminals = ["single_content_char", "new_line", "judge_assigned_name",
"number", "forward_slash"]
def parse(section_text):
grammar = Grammar(grammars[0])
custom_visitor = CustomVisitorFactory(terminals, nonterminals, dict()).create_instance()
root = grammar.parse(section_text)
# print("Parse tree:")
# print(root.prettily())
xml = custom_visitor.visit(root)
# print(xml)
return xml
|
cjaymes/pyscap | src/scap/model/oval_5/VariableIdPattern.py | Python | gpl-3.0 | 978 | 0.001022 | # Copyright 2016 Casey Jaym | es
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICUL | AR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xs.StringType import StringType
logger = logging.getLogger(__name__)
class VariableIdPattern(StringType):
# <xsd:pattern value="oval:[A-Za-z0-9_\-\.]+:var:[1-9][0-9]*"/>
def get_value_pattern(self):
return r'oval:[A-Za-z0-9_\-\.]+:var:[1-9][0-9]*'
|
TEDICpy/write-it | nuntium/migrations/0010_auto__chg_field_outboundmessage_status.py | Python | gpl-3.0 | 4,921 | 0.007519 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'OutboundMessage.status'
db.alter_column(u'nuntium_outboundmessage', 'status', self.gf('django.db.models.fields.CharField')(max_length='10'))
def backwards(self, orm):
# Changing field 'OutboundMessage.status'
db.alter_column(u'nuntium_outboundmessage', 'status', self.gf('django.db.models.fields.CharField')(max_length='4'))
models = {
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'nuntium.message': {
'Meta': {'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.messagerecord': {
'Meta': {'object_name': 'MessageRecord'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'datetime': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 5, 2, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'nuntium.outboundmessage': {
'Meta': {'object_name': 'OutboundMessage'},
'contact': ('django.db.models.fields.relat | ed.ForeignKey', [], {'to': u"orm['c | ontactos.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.Message']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ready'", 'max_length': "'10'"})
},
u'nuntium.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['nuntium'] |
dstufft/bcrypt | tests/test_bcrypt.py | Python | apache-2.0 | 6,744 | 0.006228 | import os
import mock
import pytest
import bcrypt
def test_gensalt_basic(monkeypatch):
urandom = mock.Mock(return_value=b"0000000000000000")
monkeypatch.setattr(os, "urandom", urandom)
assert bcrypt.gensalt() == b"$2a$12$KB.uKB.uKB.uKB.uKB.uK."
@pytest.mark.parametrize(("rounds", "expected"), [
(4, b"$2a$04$KB.uKB.uKB.uKB.uKB.uK."),
(5, b"$2a$05$KB.uKB.uKB.uKB.uKB.uK."),
(6, b"$2a$06$KB.uKB.uKB.uKB.uKB.uK."),
(7, b"$2a$07$KB.uKB.uKB.uKB.uKB.uK."),
(8, b"$2a$08$KB.uKB.uKB.uKB.uKB.uK."),
(9, b"$2a$09$KB.uKB.uKB.uKB.uKB.uK."),
(10, b"$2a$10$KB.uKB.uKB.uKB.uKB.uK."),
(11, b"$2a$11$KB.uKB.uKB.uKB.uKB.uK."),
(12, b"$2a$12$KB.uKB.uKB.uKB.uKB.uK."),
(13, b"$2a$13$KB.uKB.uKB.uKB.uKB.uK."),
(14, b"$2a$14$KB.uKB.uKB.uKB.uKB.uK."),
(15, b"$2a$15$KB.uKB.uKB.uKB.uKB.uK."),
(16, b"$2a$16$KB.uKB.uKB.uKB.uKB.uK."),
(17, b"$2a$17$KB.uKB.uKB.uKB.uKB.uK."),
(18, b"$2a$18$KB.uKB.uKB.uKB.uKB.uK."),
(19, b"$2a$19$KB.uKB.uKB.uKB.uKB.uK."),
(20, b"$2a$20$KB.uKB.uKB.uKB.uKB.uK."),
(21, b"$2a$21$KB.uKB.uKB.uKB.uKB.uK."),
(22, b"$2a$22$KB.uKB.uKB.uKB.uKB.uK."),
(23, b"$2a$23$KB.uKB.uKB.uKB.uKB.uK."),
(24, b"$2a$24$KB.uKB.uKB.uKB.uKB.uK."),
])
def test_gensalt_rounds_valid(rounds, expected, monkeypatch):
urandom = mock.Mock(return_value=b"0000000000000000")
monkeypatch.setattr(os, "urandom", urandom)
assert bcrypt.gensalt(rounds) == expected
@pytest.mark.parametrize(("rounds",), [[x] for x in range(1, 4)])
def test_gensalt_rounds_invalid(rounds, monkeypatch):
urandom = mock.Mock(return_value=b"0000000000000000")
monkeypatch.setattr(os, "urandom", urandom)
with pytest.raises(ValueError):
bcrypt.gensalt(rounds)
@pytest.mark.parametrize(("password", "salt", "expected"), [
(b"Kk4DQuMMfZL9o", b"$2a$04$cVWp4XaNU8a4v1uMRum2SO", b"$2a$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm"),
(b"9IeRXmnGxMYbs", b"$2a$04$pQ7gRO7e6wx/936oXhNjrO", b"$2a$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy"),
(b"xVQVbwa1S0M8r", b"$2a$04$SQe9knOzepOVKoYXo9xTte", b"$2a$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW"),
(b"Zfgr26LWd22Za", b"$2a$04$eH8zX.q5Q.j2hO1NkVYJQO", b"$2a$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne"),
(b"Tg4daC27epFBE", b"$2a$04$ahiTdwRXpUG2JLRcIznxc.", b"$2a$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2"),
(b"xhQPMmwh5ALzW", b"$2a$04$nQn78dV0hGHf5wUBe0zOFu", b"$2a$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy"),
(b"59je8h5Gj71tg", b"$2a$04$cvXudZ5ugTg95W.rOjMITu", b"$2a$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG"),
(b"wT4fHJa2N9WSW", b"$2a$04$YYjtiq4Uh88yUsExO0RNTu", b"$2a$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO"),
(b"uSgFRnQdOgm4S", b"$2a$04$WLTjgY/pZSyqX/fbMbJzf.", b"$2a$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu"),
(b"tEPtJZXur16Vg", b"$2a$04$2moPs/x/wnCfeQ5pCheMcu", b"$2a$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG"),
(b"vvho8C6nlVf9K", b"$2a$04$HrEYC/AQ2HS77G78cQDZQ.", b"$2a$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2"),
(b"5auCCY9by0Ruf", b"$2a$04$vVYgSTfB8KVbmhbZE/k3R.", b"$2a$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG"),
(b"GtTkR6qn2QOZW", b"$2a$04$JfoNrR8.doieoI8..F.C1O", b"$2a$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m"),
(b"zKo8vdFSnjX0f", b"$2a$04$HP3I0PUs7KBEzMBNFw7o3O", b"$2a$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy"),
(b"I9VfYlacJiwiK", b"$2a$04$xnFVhJsTzsFBTeP3PpgbMe", b"$2a$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6"),
(b"VFPO7YXnHQbQO", b"$2a$04$WQp9.igoLqVr6Qk70mz6xu", b"$2a$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6"),
(b"VDx5BdxfxstYk", b"$2a$04$xgZtlonpAHSU/njOCdKztO", b"$2a$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS"),
(b"dEe6XfVGrrfSH", b"$2a$04$2Siw3Nv3Q/gTOIPetAyPr.", b"$2a$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"),
(b"cTT0EAFdwJiLn", b"$2a$04$7/Qj7Kd8BcSahPO4khB8me", b"$2a$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m"),
(b"J8eHUDuxBB520", b"$2a$04$VvlCUKbTMjaxaYJ.k5juoe", b"$2a$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk."),
])
def test_hashpw_new(password, salt, expected):
assert bcrypt.hashpw(password, salt) == expected
@pytest.mark.parametrize(("password", "hashed"), [
(b"Kk4DQuMMfZL9o", b"$2a$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm"),
(b"9IeRXmnGxMYbs", b"$2a$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy"),
(b"xVQVbwa1S0M8r", b"$2a$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW"),
(b"Zfgr26LWd22Za", b"$2a$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne"),
(b"Tg4daC27epFBE", b"$2a$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2"),
(b"xhQPMmwh5ALzW", b"$2a$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy"),
(b"59je8h5Gj71tg", b"$2a$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG"),
(b"wT4fHJa2N9WSW", b"$2a$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO"),
(b"uSgFRnQdOgm4S", b"$2a$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu"),
(b"tEPtJZXur16Vg", b"$2a$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG"),
(b"vvho8C6nlVf9K", b"$2a$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2"),
(b"5auCCY9by0Ruf", b"$2a$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG"),
(b"GtTkR6qn2QOZW", b"$2a$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m"),
(b"zKo8vdFSnjX0f", b"$2a$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy"),
(b"I9VfYlacJiwiK", b"$2a$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6"),
(b"VFPO7YXnHQbQO", b"$2a$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6"),
(b"VDx5BdxfxstYk", b"$2a$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS"),
(b"dEe6XfVGrrfSH", b"$2a$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"),
(b"cTT0EAFdwJiLn", b"$2a$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m"),
(b"J8eHUDuxBB520", b"$2a$04$VvlCUKbTMjaxaYJ.k5 | juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk."),
])
def test_hashpw_existing(password, hashed):
assert bcrypt.hashpw(password, hashed) == hashed
def test_hashpw_invalid():
with pytest.raises(ValueError):
bcrypt.hashpw(b"password", b"$2z$04$cVWp4XaNU8a4v1uMRum2SO")
def test_hashpw_str_password():
with pytest.raises(TypeError):
bcrypt.hashpw(bcrypt.text_type("password"), b"$2a$04$cVWp4XaNU8a4v1uMRum2SO")
def test_ | hashpw_str_salt():
with pytest.raises(TypeError):
bcrypt.hashpw(b"password", bcrypt.text_type("$2a$04$cVWp4XaNU8a4v1uMRum2SO"))
|
dariomangoni/chrono | src/demos/python/solidworks/demo_SW_irrlicht.py | Python | bsd-3-clause | 3,109 | 0.013509 | #------------------------------------------------------------------------------
# Name: pychrono example
# Purpose:
#
# Author: Alessandro Tasora
#
# Created: 1/01/2019
# Copyright: (c) ProjectChrono 2019
#------------------------------------------------------------------------------
import pychrono as chrono
import pychrono.irrlicht as chronoirr
# The path to the Chrono data | directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
# ---------------------------------------------------------------------
#
# Create the simulation system.
# (Do not create parts and constraints programmatically here, we will
# | load a mechanism from file)
my_system = chrono.ChSystemNSC()
# Set the collision margins. This is expecially important for very large or
# very small objects (as in this example)! Do this before creating shapes.
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001);
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001);
# ---------------------------------------------------------------------
#
# load the file generated by the SolidWorks CAD plugin
# and add it to the system
#
print ("Loading C::E scene...");
exported_items = chrono.ImportSolidWorksSystem(chrono.GetChronoDataFile('solid_works/swiss_escapement'))
print ("...done!");
# Print exported items
for my_item in exported_items:
print (my_item.GetName())
# Add items to the physical system
for my_item in exported_items:
my_system.Add(my_item)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(my_system, 'Test: using data exported by Chrono::Solidworks', chronoirr.dimension2du(1024,768));
myapplication.AddTypicalSky()
myapplication.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
myapplication.AddTypicalCamera(chronoirr.vector3df(0.3,0.3,0.4))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.GetSystem().SetMaxPenetrationRecoverySpeed(0.002);
myapplication.SetTimestep(0.002)
while(myapplication.GetDevice().run()):
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
|
havard024/prego | venv/bin/pilfont.py | Python | mit | 1,043 | 0.003835 | #!/var/www/django/treeio/venv/bin/python
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
VERSION = "0.4"
import glob, sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek | (0)
p = BdfFontFile.BdfFontFile(fp) |
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
|
JonnaStalring/AZOrange | azorange/trainingMethods/AZorngConsensus.py | Python | lgpl-3.0 | 34,765 | 0.010326 | """
AZorngonsensus
Module to build a Consensus model.
"""
import glob
import string
import os,sys
import AZBaseClasses
import orange,Orange
import sys
import re
import pickle
from cStringIO import StringIO
from tokenize import generate_tokens
from AZutilities import dataUtilities
#from AZutilities import miscUtilities
import AZOrangeConfig as AZOC
def IF0(expr, ifTrue):
"""Tests expr, and if it is zero, return what's in ifTrue otherwise return the expr"""
if expr == 0:
return ifTrue
else:
return expr
class ConsensusLearner(AZBaseClasses.AZLearner):
"""
Creates a Consensus as an Orange type of learner instance.
"""
def __new__(cls, trainingData = None, name = "Consensus learner", **kwds):
self = AZBaseClasses.AZLearner.__new__(cls, **kwds)
if trainingData:
self.__init__(name, **kwds)
return self.__call__(trainingData)
else:
self.__dict__.update(kwds)
self.name = name
return self
def __init__(self, name = "Consensus learner", **kwds):
"""
Set default values of the model parameters if they are not given as inputs
"""
self.basicStat = None
self.expression = None
self.imputeData = None
self.learners = None
self.name = name
self.NTrainEx = 0
self.verbose = 0
self.weights = None
# Append arguments to the __dict__ member variable
self.__dict__.update(kwds)
def __call__(self, trainingData, weight = None):
"""Creates a Consensus model from the data in trainingData. """
if not AZBaseClasses.AZLearner.__call__(self,trainingData, weight) or not trainingData:
return None
# Make sure the correct number of arguments are supplied
if not self.learners:
return None
if len(self.learners) <= 1:
print "ERROR: The Consensus model needs at least 2 valid learners.\n" + "Learners: " + str(self.learners)
return None
if type(self.learners).__name__ == 'dict' and not self.expression:
print "ERROR: Missing expression! You must provide an expression together with the learner mapping."
return None
# Call the train method
if type(self.learners).__name__ == 'list':
if trainingData.domain.classVar.varType == orange.VarTypes.Discrete and len(trainingData.domain.classVar.values) != 2:
print "ERROR: The Consensus model only supports binary classification or regression problems."
return None
# Default behaviour, no expression defined.
classifiers = []
for learner in self.learners:
classifiers.append(learner(trainingData))
if not classifiers[-1]:
if self.verbose > 0:
print "ERROR: Could not create the model ",str(learner)
| return None
else:
#Try to get the imputeData, basicStat from a model that have it!
if hasattr(classifiers[-1], "basicStat") and classifiers[-1].basicStat and not self.basicStat:
self.basicStat = classifiers[-1].basicStat
if hasattr(classifiers[-1], "NTrainEx") and classifiers[-1].basicStat and not self.NTrainEx:
| self.NTrainEx = len(trainingData)
if hasattr(classifiers[-1], "imputeData") and classifiers[-1].imputeData and not self.imputeData:
self.imputeData = classifiers[-1].imputeData
return ConsensusClassifier(classifiers = classifiers,
classVar = trainingData.domain.classVar,
verbose = self.verbose,
domain = trainingData.domain,
varNames = [attr.name for attr in trainingData.domain.attributes],
NTrainEx = self.NTrainEx,
basicStat = self.basicStat,
imputeData = self.imputeData)
else:
classifiers = {}
for learner in self.learners:
newClassifier = self.learners[learner](trainingData)
if not newClassifier:
if self.verbose > 0:
print "ERROR: Could not create the model ",str(learner)
return None
else:
classifiers[learner] = newClassifier
#Try to get the imputeData, basicStat from a model that have it!
if hasattr(newClassifier, "basicStat") and newClassifier.basicStat and not self.basicStat:
self.basicStat = newClassifier.basicStat
if hasattr(newClassifier, "NTrainEx") and newClassifier.basicStat and not self.NTrainEx:
self.NTrainEx = len(trainingData)
if hasattr(newClassifier, "imputeData") and newClassifier.imputeData and not self.imputeData:
self.imputeData = newClassifier.imputeData
return ConsensusClassifier(classifiers = classifiers,
expression = self.expression,
weights = self.weights,
classVar = trainingData.domain.classVar,
verbose = self.verbose,
domain = trainingData.domain,
varNames = [attr.name for attr in trainingData.domain.attributes],
NTrainEx = self.NTrainEx,
basicStat = self.basicStat,
imputeData = self.imputeData)
class ConsensusClassifier(AZBaseClasses.AZClassifier):
def __new__(cls, name = "Consensus classifier", **kwds):
self = AZBaseClasses.AZClassifier.__new__(cls, name = name, **kwds)
return self
def getTopImportantVars(self, inEx, nVars = 1, gradRef = None, absGradient = True, c_step = None, getGrad = False):
return {"NA":"Not aplicable: No harmonized DFV"}
def __init__(self, name = "Consensus classifier", **kwds):
#Optional inputs
# name
self.expression = None
self.verbose = 0
self.varNames = None
self.domain = None
self.classVar = None
self.NTrainEx = None
self.basicStat = None
self.imputeData = None
self.weights = None
#Required Inputs:
self.classifiers = None
self.__dict__.update(kwds)
self._isRealProb = False
self.name = name
self.status = ""
if not self.classVar or not self.domain or not self.varNames:
self._setDomainAndClass()
if not self.NTrainEx or not self.basicStat or not self.imputeData:
self._setStatData()
if type(self.classifiers).__name__ == 'list' and self.domain.classVar.varType == orange.VarTypes.Discrete and len(self.domain.classVar.values) != 2:
raise Exception("ERROR: The Consensus model only supports binary classification or regression problems.")
def _singlePredict(self, origExample = None, resultType = orange.GetValue, returnDFV = False):
"""
orange.GetBoth - <type 'tuple'> -> (<orange.Value 'Act'='3.44158792'>, <3.442: 1.000>)
orange.GetValue - <type 'orange.Value'> -> <orange.Value 'Act'='3.44158792'>
orange.GetProbabilities - <type 'orange.DiscDistribution'> -> <0.000, 0.000>
returnDFV - Flag indicating to return the Decision Function Value. If set to True, it will encapsulate the ori |
TheOtherDays/todmisc-songops | setup.py | Python | gpl-3.0 | 471 | 0.002123 | f | rom setuptools import setup
setup(name='todmisc-songops',
version='0.1',
description='The Other Days Music and Instrument Sound Converter, LSDJ song operations',
url='https://github.com/TheOtherDays/todmisc-songops',
author='The Other Days (http://theotherdays.net)',
author_email='theotherdays@users.noreply.github.com',
license='GPL v2',
packages=['todmisc-songops'],
install_requires=['pylsdj'],
zip_safe=False)
| |
DomBennett/pG-lt | tests/test_tools_download.py | Python | gpl-2.0 | 8,457 | 0.000355 | #! /bin/usr/env python
# D.J. Bennett
# 26/05/2014
"""
Tests for download tools.
"""
import unittest
import pickle
import logging
import os
import pglt.tools.download_tools as dtools
# DIRS
working_dir = os.path.dirname(__file__)
# DUMMIES
# expected terms and term search results
t1_term = '(txid1[PORGN]) OR txid2[PORGN] AND (("name1"[GENE]) \
OR "name2"[GENE]) NOT predicted[TI] NOT genome[TI] NOT unverified[TI]'
t2_term = '(txid1[PORGN]) OR txid2[PORGN] AND (("name1"[TI]) \
OR "name2"[TI]) NOT predicted[TI] NOT genome[TI] NOT unverified[TI]'
t3_term = '(txid1[PORGN]) OR txid2[PORGN] AND (("name1") \
OR "name2") NOT predicted[TI] NOT shotgun[TI] NOT scaffold[TI] \
NOT assembly[TI] NOT unverified[TI]'
t1_search_res = {'Count': 0}
t2_search_res = {'Count': 2, 'IdList': ['seq1', 'seq2']}
t3_search_res = {'Count': 3, 'IdList': ['seq1', 'seq2', 'seq3']}
outgroup_res = {'Count': 3, 'IdList': ['seq4', 'seq5', 'seq6']}
# Example seqrecord for findgeneinseq
with open(os.path.join(working_dir, 'data', "test_findgeneinseq_examplesequence\
.p"), "r") as file:
sequence = pickle.load(file)
# Dummy seq records for download
class dummy_Seq(object):
def __init__(self):
pass
def __str__(self):
# Just for parsing
return "A" * 500
class dummy_SeqRecord(object):
def __init__(self, description, length=500):
self.description = description
self.length = length
self.seq = dummy_Seq()
self.features = None
def __len__(self):
return self.length
seq1 = dummy_SeqRecord(description="A sequence of NAME1")
seq2 = dummy_SeqRecord(description="A sequence of NAME2")
seq3 = [dummy_SeqRecord(description="A sequence of NAME3"),
dummy_SeqRecord(description="A sequence of NAME4"),
dummy_SeqRecord(description="A sequence of NAME5"),
dummy_SeqRecord(description="A sequence of NAME1")]
# Sequences -- just Ts and Fs -- for testing filter
sequences = [True for i in range(80)]
sequences.extend([False for i in range(20)])
# Dependent stubs
def dummy_eSearch(term, logger, retStart=0, retMax=1, usehistory="n",
db="nucleotide"):
if term == t1_term:
return t1_search_res
if term == t2_term:
return t2_search_res
if term == t3_term:
return t3_search_res
else:
return outgroup_res
def dummy_eFetch(ncbi_id, logger, db="nucleotide"):
if ncbi_id == 'seq1':
return seq1
elif ncbi_id == 'seq2':
return seq2
elif ncbi_id == 'seq3':
return seq3
else:
# return all as list
return [seq1, seq2, seq3]
def dummy_blast(query, subj, minoverlap, logger, wd, threads):
# should return bools and positions
# pretend they've matched from 0-100 base positions
return query, [0, 100]
def dummy_checkAlignment(alignment, maxgaps, minoverlap, minlen, logger):
return alignment
# downloader init variables
taxids = ['1', '2']
gene_names = ['name1', 'name2']
nseqs = 2
thoroughness = 3
maxpn = 0.1
votesize = 3
maxgaps = 0.01
minoverlap = 200
maxtrys = 100
maxlen = 2000
minlen = 300
# dictionary variables
namesdict = {"species1": {'txids': ['1', '2']}, 'outgroup': {'txids': ['4']}}
allrankids = [1, 2, 3]
genedict = {'gene1': {'taxid': '3', 'names': ['name1', 'name2'],
'type': 'deep'}}
class DownloadTestSuite(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger()
self.wd = os.getcwd()
self.true_eSearch = dtools.etools.eSearch
self.true_eFetch = dtools.etools.eFetch
self.true_blast = dtools.atools.blast
self.true_checkAlignment = dtools.atools.checkAlignment
dtools.etools.eSearch = dummy_eSearch
dtools.etools.eFetch = dummy_eFetch
dtools.atools.blast = dummy_blast
dtools.atools.checkAlignment = dummy_checkAlignment
# mock Downloader instance
self.downloader = dtools.Downloader(gene_names=gene_names,
nseqs=nseqs,
thoroughness=thoroughness,
maxpn=maxpn, votesize=votesize,
maxtrys=maxtrys,
minoverlap=minoverlap,
maxlen=maxlen, minlen=minlen,
logger=self.logger, wd=self.wd)
# expected search terms at different thoroughnesses
self.t1_term = t1_term
self.t2_term = t2_term
self.t3_term = t3_term
self.seqids = ['seq1', 'seq2', 'seq3']
self.seq1 = seq1
self.seq2 = seq2
self.seq3 = seq3
self.sequences = sequences
self.taxids = taxids
self.record = sequence
self.namesdict = namesdict
self.allrankids = allrankids
self.genedict = genedict
def tearDown(self):
# repatch
dtools.etools.eSearch = self.true_eSearch
dtools.etools.eFetch = self.true_eFetch
dtools.atools.blast = self.true_blast
dtools.atools.checkAlignment = self.true_checkAlignment
def test_downloader_private_buildsearchterm_thoroughness1(self):
res = self.downloader._buildSearchTerm(self.taxids, 1)
self.assertEqual(res, self.t1_term)
def test_downloader_private_buildsearchterm_thoroughness2(self):
res = self.downloader._buildSearchTerm(self.taxids, 2)
self.assertEqual(res, self.t2_term)
def test_downloader_private_buildsearchterm_thoroughness3(self):
res = self.downloader._buildSearchTerm(self.taxids, 3)
self.assertEqual(res, self.t3_term)
def test_downloader_private_search(self):
# expect to only find 2, 1 and 0 sequences
# it should search until it finds two sequences (nseqs = 2),
# and then on the next search after raising its thoroughness
# it should find the last sequence. Searching again will
# find no more.
res1 = self.downloader._search(self.taxids)
res2 = self.downloader._search(self.taxids)
res3 = self.downloader._search(self.taxids)
self.assertEqual([len(res1), len(res2), len(res3)], [2, 1, 0])
def test_downloader_private_filter(self):
# weeds out Falses from sequences, should not be any Falses
# self.sequences is 80 Ts and 20 Fs
sequences = self.sequences[:]
res_filtered, res_downloaded = self.downloader._filter(sequences)
self.assertEqual(len(res_filtered), 80)
self.assertEqual(len(res_downloaded), 20)
def test_downloader_priv | ate_findgeneinseq(self):
# change gene names for test
gene_names = self.downloader.gene_names
self.downloader.gene_names = ['COI']
res = self.downloader._findGeneInSeq(self.record)
self.downloader.gene_names = gene_name | s
# I know that the COI sequence is 1545bp (5350..6894)
# (http://www.ncbi.nlm.nih.gov/nuccore/AM711897.1)
self.assertEqual(len(res), 1545)
def test_downloader_private_parse(self):
# seq3, a list of SeqRecords of which the third is the right
# size and contains no Ns
res = self.downloader._parse(self.seq3)
self.assertIsNotNone(res)
def test_downloader_private_download(self):
res = self.downloader._download(self.seqids)
self.assertEqual(len(res), 3)
def test_downloader_run(self):
# reset thoroughness and deja_vues
self.downloader.thoroughness = 1
self.downloader.deja_vues = []
res = self.downloader.run(self.taxids)
self.assertEqual(len(res), 3)
def test_findbestgenes(self):
res = dtools.findBestGenes(self.namesdict, self.genedict, 3,
self.allrankids, logger=self.logger,
minnseq=1, target=1, minnspp=0)
self.assertEqual(res[0], 'gene1')
def test_get_clusters(self):
# make a gene_sequences: [(name, sequence), ...]
# should return 80 sequences
names = ['sp1', 'sp2', 'sp3', 'sp4', 'sp5' |
CharlesZhong/hupu_miner | miner/miner/settings.py | Python | apache-2.0 | 821 | 0.003654 | # -*- coding: utf-8 -*-
# Scrapy settings for miner project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'miner'
SPIDER_MODULES = ['miner.spiders']
NEWSPIDER_MODULE = 'miner.spiders'
# Crawl responsibly by | identifying yourself (and your website) on the user-agent
#USER_AGENT = 'miner (+http://www.yourdomain.com)'
ITEM_PIPELINES = {
BOT_NAME+'.pipelines.BXJDailyPostDigestPipeline': 300,
BOT_NAME+'.pipelines.BXJPostDigestDuplicatesPipeline': 301,
BOT_NAME+'.pipelines.BXJPostDigestJsonWirterPipeline': 302,
| BOT_NAME+'.pipelines.BXJPostDigestMongoPipeline': 303,
}
MONGODB_URI = "localhost"
MONGODB_PORT = 27017
MONGODB_DB = "HupuMiner"
|
thespacedoctor/fundamentals | fundamentals/mysql/sqlite2mysql.py | Python | gpl-3.0 | 10,966 | 0.002462 | #!/usr/local/bin/python
# encoding: utf-8
"""
Take a sqlite database file and copy the tables within it to a MySQL database
Usage:
sqlite2mysql -s <pathToSettingsFile> <pathToSqliteDB> [<tablePrefix>]
Options:
pathToSqliteDB path to the sqlite database file
tablePrefix a string to prefix the table names when copying to mysql database
pathToSettingsFile path to a settings file with logging and database information (yaml file)
-h, --help show this help message
-v, --version show version
-s, --settings the settings file
"""
from builtins import object
import sys
import os
import sqlite3 as lite
os.environ['TERM'] = 'vt100'
from fundamentals import tools
from fundamentals.mysql import writequery
from datetime import datetime, date, time
def main(arguments=None):
"""
The main function used when ``yaml_to_database.py`` when installed as a cl tool
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName=False
)
arguments, settings, log, dbConn = su.setup()
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in list(arguments.items()):
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
from fundamentals.mysql import sqlite2mysql
converter = sqlite2mysql(
log=log,
settings=settings,
pathToSqlite=pathToSqliteDB,
tablePrefix=tablePrefix,
dbConn=dbConn
)
converter.convert_sqlite_to_mysql()
return
class sqlite2mysql(object):
"""
*Take a sqlite database file and copy the tables within it to a MySQL database*
**Key Arguments**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``pathToSqlite`` -- path to the sqlite database to transfer into the MySQL database
- ``tablePrefix`` -- a prefix to add to all the tablename when converting to mysql. Default *""*
- ``dbConn`` -- mysql database connection
**Usage**
To setup your logger, settings and database connections, please us | e the ``fundamentals`` package (`see tutorial here <http://fundamentals.readthedocs.io/en/latest/#tutorial>`_).
To convert and import the content of a sqlite database into MySQL run the following:
.. todo::
- add a tutorial about ``sqlite2mysql`` to documentation
```python
from fundamentals.mysql import sqlite2mysql
converter = sqlite2mysql(
log=log,
settings=settings,
pathToSqlite="/path/to/sqlite.db",
tablePref | ix="external"
)
converter.convert_sqlite_to_mysql()
```
"""
# Initialisation
def __init__(
self,
log,
pathToSqlite,
tablePrefix="",
settings=False,
dbConn=False
):
self.log = log
log.debug("instansiating a new 'sqlite2mysql' object")
self.settings = settings
self.pathToSqlite = pathToSqlite
self.tablePrefix = tablePrefix
self.dbConn = dbConn
if not self.tablePrefix:
self.tablePrefix = ""
if len(self.tablePrefix):
self.tablePrefix = self.tablePrefix + "_"
# xt-self-arg-tmpx
return None
def convert_sqlite_to_mysql(
self):
"""*copy the contents of the sqlite database into the mysql database*
See class docstring for usage
"""
from fundamentals.renderer import list_of_dictionaries
from fundamentals.mysql import directory_script_runner
self.log.debug('starting the ``convert_sqlite_to_mysql`` method')
con = lite.connect(self.pathToSqlite)
con.row_factory = lite.Row
cur = con.cursor()
# GET ALL TABLE NAMES
cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cur.fetchall()
createStatements = []
inserts = []
for table in tables:
table = table['name']
if table == "sqlite_sequence":
continue
# CREATE TABLE collection_books (folder_id, fingerprint, primary key(folder_id, fingerprint));
# GENEREATE THE MYSQL CREATE STATEMENTS FOR EACH TABLE
cur.execute(
"SELECT sql FROM sqlite_master WHERE name = '%(table)s';" % locals())
createStatement = cur.fetchone()
createStatement = createStatement[0].replace('"', '`') + ";"
if "DEFAULT" not in createStatement:
if "primary key(" in createStatement:
tmp = createStatement.split("primary key(")
tmp[0] = tmp[0].replace(
",", " varchar(150) DEFAULT NULL,")
createStatement = ("primary key(").join(tmp)
if "primary key," in createStatement:
tmp = createStatement.split("primary key,")
tmp[1] = tmp[1].replace(
",", " varchar(150) DEFAULT NULL,")
tmp[1] = tmp[1].replace(
");", " varchar(150) DEFAULT NULL);")
createStatement = ("primary key,").join(tmp)
createStatement = createStatement.replace(
"INTEGER PRIMARY KEY", "INTEGER AUTO_INCREMENT PRIMARY KEY")
createStatement = createStatement.replace(
"AUTOINCREMENT", "AUTO_INCREMENT")
createStatement = createStatement.replace(
"DEFAULT 't'", "DEFAULT '1'")
createStatement = createStatement.replace(
"DEFAULT 'f'", "DEFAULT '0'")
createStatement = createStatement.replace(",'t'", ",'1'")
createStatement = createStatement.replace(",'f'", ",'0'")
if "CREATE TABLE `" in createStatement:
createStatement = createStatement.replace(
"CREATE TABLE `", "CREATE TABLE IF NOT EXISTS `" + self.tablePrefix)
else:
createStatement = createStatement.replace(
"CREATE TABLE ", "CREATE TABLE IF NOT EXISTS " + self.tablePrefix)
if ", primary key(" in createStatement:
createStatement = createStatement.replace(", primary key(", """,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` tinyint(4) DEFAULT '0',
primary key(""")
else:
createStatement = createStatement.replace(");", """,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` tinyint(4) DEFAULT '0');
""")
createStatement = createStatement.replace(
" text primary key", " varchar(100) primary key")
createStatement = createStatement.replace(
"`EntryText` TEXT NOT NULL,", "`EntryText` TEXT,")
createStatement = createStatement.replace(
"`SelectionText` TEXT NOT NULL", "`SelectionText` TEXT")
createStatement = createStatement.replace(
"`Filename` INTEGER NOT NULL,", "`Filename` TEXT NOT NULL,")
createStatement = createStatement.replace(
"`SessionPartUUID` TEXT NOT NULL UNIQUE,", "`SessionPartUUID` VARCHAR(100) NOT NULL UNIQUE,")
createStatement = createStatement.replace(
"`Name` TEXT PRIMARY KEY NOT NULL", "`Name` VARCHAR(100) PRIMARY KEY NOT NULL")
createStatement = createStatement.replace(
" VARCHAR ", " VARCHAR(100) ")
createStatement = createStatement.replace(
" VARCH |
google-research/adamatch | semi_supervised_domain_adaptation/baseline.py | Python | apache-2.0 | 7,016 | 0.002851 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx = jn.split(logit, (2 * sx.shape[0],))[0]
| logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.c | ross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(jn.concatenate((sx, tx)), jn.concatenate((sy, ty)), tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = Baseline(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
jaredly/codetalker | tests/parse/optional.py | Python | mit | 720 | 0.009722 | #!/usr/bin/env python
from codetalker.pgm import Grammar
from codetalker.pgm.tokens import *
from codetalker.pgm.special import *
def start(rule):
rule | plus(value)
def value(rule):
rule | ([ID], STRING)
g = Grammar(start=start, tokens=[ID, STRING, WHITE], ignore=[WHITE])
def test_one():
tree = g.get_as | t('"string" "string" "strin" ')
assert len(tree) == 3
def test_onother():
st = '"string" "string" "strin" '
tree = g.process(st)
assert str(tree) == st
def test_two():
tree = g.get_ast('one "two" three "four" five "six"')
assert len(tree) == 3
def test_three():
tree = g.get_ast('"one" two "three" | "four" five "six"')
assert len(tree) == 4
# vim: et sw=4 sts=4
|
ngageoint/scale | scale/source/test/configuration/test_source_data_file.py | Python | apache-2.0 | 4,106 | 0.004384 | from __future__ import unicode_literals
import datetime
import django
from django.utils.timezone import now
from django.test import TransactionTestCase
from mock import call, patch
from job.seed.metadata import SeedMetadata
from source.configuration.source_data_file import SourceDataFileParseSaver
from storage.models import ScaleFile, Workspace
from util.parse import parse_datetime
class TestSourceDataFileParseSaverSaveParseResults(TransactionTestCase):
def setUp(self):
django.setup()
self.workspace = Workspace.objects.create(name='Test workspace')
self.file_name_1 = 'my_file.txt'
self.media_type_1 = 'text/plain'
self.source_file_1 = ScaleFile.objects.create(file_name=self.file_name_1, file_type='SOURCE',
media_type=self.media_type_1, file_size=10, data_type_tags=['Dummy'],
file_path='the_path', workspace=self.workspace)
self.file_name_2 = 'my_file.json'
self.media_type_2 = 'application/json'
self.source_file_2 = ScaleFile.objects.create(file_name=self.file_name_2, file_type='SOURCE',
media_type=self.media_type_2, file_size=10, data_type_tags=['Dummy'],
file_path='the_path', workspace=self.workspace)
self.extra_source_file_id = 99999
@patch('source.configuration.source_data_file.SourceFile.objects.save_parse_results')
def test_successful(self, mock_save):
"""Tests calling SourceDataFileParseSaver.save_parse_results() successfully"""
geo_json = {'type': 'Feature'}
started = now()
ended = started + datetime.timedelta(days=1)
# quick hack to give these a valid timezone. Easier than creating a TZ object since we don't really care about the time for this test.
started = parse_datetime(started.isoformat() + "Z")
ended = parse_datetime(ended.isoformat() + "Z")
file_ids = [self.source_file_1.id, self.source_file_2.id, self.extra_source_file_id]
parse_results = {self.file_name_1: (geo_json, started, None, [], None),
self.file_name_2: (None, None, ended, [], None),
'FILE_WITH_NO_SOURCE_FILE_MODEL': (None, None, None, None, None)}
SourceDataFileParseSaver().save_parse_results(parse_results, file_ids)
calls = [call(self.source_file_1.id, geo_json, started, None, [], None),
call(self.source_file_2.id, None, None, ended, [], None)]
self.assertEqual(mock_save.call_count, 2)
mock_save.assert_has_calls(calls, any_order=True)
@patch('source.configuration.source_data_file.SourceFile.objects.save_parse_results')
def test_successful_v6(self, mock_save):
"""Tests calling SourceDataFileParseSaver.save_parse_results_v6() successfully"""
started = '2018-06-01T00:00:00Z'
ended = '2018-06-01T01:00:00Z'
types = ['one', 'two', 'three']
new_workspace_path = 'awful/path'
data = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [0, 1]
},
'properties':
{
'dataStarted': started,
'dataEnded': ended,
'dataTypes': types,
'newWorkspacePath': new_workspace_path
}
}
metadata = {self.source_file_1.id: SeedMetadata.metadata_from_json(data, do_validate= | False)}
calls = [call(self.source_file_1.id, data, parse_datetime(started), parse_datetime(ended), types, new_workspace_path)]
SourceDataFileParseSaver().save_parse_results_v6(metadata)
self.assertEqual(mock_save.call_count, 1)
mock_save.assert_has_calls(calls, any_order=Tr | ue)
|
google/prog-edu-assistant | python/colab/preamble.py | Python | apache-2.0 | 4,093 | 0.012949 | # This is helper cell that defines code checking (testing) function Check()
# Please run it once, but you do not need to understand it.
import re
import sys
import jinja2
from IPython.core import display
from prog_edu_assistant_tools.magics import report, autotest, CaptureOutput
from google.colab import _message as google_message
def GetNotebook():
"""Downloads the ipynb source of Colab notebook"""
notebook = google_message.blocking_request(
"get_ipynb", request="", timeout_sec=120)["ipynb"]
return notebook
def RunInlineTests(submission_source, inlinetests):
"""Runs an inline test."""
errors = []
for test_name, test_source in inlinetests.items():
#print(f'Running inline test {test_name}:\n{test_source}', file=sys.stderr)
with CaptureOutput() as (stdout, stderr):
try:
env = {}
exec(submission_source, globals(), env)
exec(test_source, globals(), env)
except AssertionError as e:
errors.append(str(e))
if len(stderr.getvalue()) > 0:
errors.append('STDERR:' + stderr.getvalue())
if len(errors) > 0:
results = {'passed': False, 'error': '\n'.join(errors)}
else:
results = {'passed': True}
template_source = """
<h4 style='color: #387;'>Your submission</h4>
<pre style='background: #F0F0F0; padding: 3pt; margin: 4pt; border: 1pt solid #DDD; border-radius: 3pt;'>{{ formatted_source }}</pre>
<h4 style='color: #387;'>Results</h4>
{% if 'passed' in results and results['passed'] %}
✅
Looks OK.
{% elif 'error' in results %}
❌
{{results['error'] | e}}
{% else %}
❌ Something is wrong.
{% endif %}"""
template = jinja2.Template(template_source)
html = template.render(formatted_source=submission_source, results=results)
return html
def Check(exercise_id):
"""Checks one exercise against embedded inline tests."""
def _get_exercise_id(cell):
if 'metadata' in cell and 'exercise_id' in cell['metadata']:
return cell['metadata']['exercise_id']
if 'source' not in cell or 'cell_type' not in cell or cell['cell_type'] != 'code':
return None
source = ''.join(cell['source'])
m = re.search('(?m)^# *EXERCISE_ID: [\'"]?([a-zA-Z0-9_.-]*)[\'"]? *\n', source)
if m:
return m.group(1)
retur | n None
notebook = GetNotebook()
# 1. Find the first cell with specified exercise ID.
found = False
for (i, cell) in enumerate(notebook['cells']):
if _get_exercise_id(cell) == exercise_id:
found = True
break
if not found:
raise Exception(f'exercise {exercise_id} not | found')
submission_source = ''.join(cell['source']) # extract the submission cell
submission_source = re.sub(r'^%%(solution|submission)[ \t]*\n', '', submission_source) # cut %%solution magic
inlinetests = {}
if 'metadata' in cell and 'inlinetests' in cell['metadata']:
inlinetests = cell['metadata']['inlinetests']
if len(inlinetests) == 0:
j = i+1
# 2. If inline tests were not present in metadata, find the inline tests
# that follow this exercise ID.
while j < len(notebook['cells']):
cell = notebook['cells'][j]
if 'source' not in cell or 'cell_type' not in cell or cell['cell_type'] != 'code':
j += 1
continue
id = _get_exercise_id(cell)
source = ''.join(cell['source'])
if id == exercise_id:
# 3. Pick the last marked cell as submission cell.
submission_source = source # extract the submission cell
submission_source = re.sub(r'^%%(solution|submission)[ \t]*\n', '', submission_source) # cut %%solution magic
j += 1
continue
m = re.match(r'^%%inlinetest[ \t]*([a-zA-Z0-9_]*)[ \t]*\n', source)
if m:
test_name = m.group(1)
test_source = source[m.end(0):] # cut %%inlinetest magic
# 2a. Store the inline test.
inlinetests[test_name] = test_source
if id is not None and id != exercise_id:
# 4. Stop at the next exercise_id.
break
j += 1
html = RunInlineTests(submission_source, inlinetests)
return display.HTML(html)
|
sdrdis/iarpa_contest_submission | chain.py | Python | mit | 1,327 | 0.012811 | '''
This the main code for running the chain.
It is quite straightforward: it runs chain_pairwise_pc and chain_merge_pcs (with a few additional logs...)
'''
import chain_pairwise_pc
import chain_merge_pcs
import time
from params import *
import sys
import os
import os.path
import shutil
import numpy as np
import resource
from functions import *
nb_args = len(sys.argv)
if (nb_args < 4):
print 'Correct format: python chain.py [Input KML file] [Path to NITF image folder] [Output file]'
else:
kml_path = sys.argv[1]
images_paths = sys.argv[2]
if (images_paths[-1] != '/'):
images_paths += '/'
out_path = sys.argv[3]
skip_align = False
| for arg in sys.argv:
if (arg == 'skip_align'):
skip_align = True
start | _time = time.time()
times = [start_time]
if not is_debug_mode:
shutil.rmtree(tmp_path, True)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
chain_pairwise_pc.run(kml_path, images_paths)
register_time(times)
chain_merge_pcs.run(kml_path, out_path)
register_time(times)
display_times(times)
total_time = time.time() - start_time
save_times(times, total_time, tmp_path + 'durations.txt')
print 'Total time:', total_time
|
Alignak-monitoring-contrib/alignak-app | alignak_app/locales/__init__.py | Python | agpl-3.0 | 885 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) | is distributed in the | hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
"""
The Locales package contains classes to manage translation.
"""
|
mefly2012/platform | src/clean_validate/qyxg_zzjgdm.py | Python | apache-2.0 | 5,593 | 0.002275 | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
import re
class qyxg_zzjgdm():
"""组织机构代码"""
need_check_ziduan = [
'_id',
'bbd_table',
'bbd_type',
'bbd_uptime',
'bbd_dotime',
'bbd_version',
'jgdm',
'jgmc',
'jgdjzh',
'credit_code',
'organization_type',
'certificate_date',
'certificate_orgname',
'esdate',
'validdate'
]
def check__id(self, indexstr, ustr):
"""唯一id"""
"""不可为空"""
ret = None
if ustr and len(ustr.strip()):
pass
else:
ret = u'为空'
return ret
def check_bbd_table(self, indexstr, ustr):
"""最终入库表名"""
"""不可为空"""
ret = None
if ustr and len(ustr.strip()):
pass
else:
ret = u'为空'
return ret
def check_bbd_type(self, indexstr, ustr):
"""表类型"""
"""不可为空"""
ret = None
if ustr and len(ustr.strip()):
pass
else:
ret = u'为空'
return ret
def check_bbd_uptime(self, indexstr, ustr):
"""时间戳"""
"""不可为空"""
ret = None
if ustr and len(ustr.strip()):
pass
else:
ret = u'为空'
return ret
def check_bbd_dotime(self, indexstr, ustr):
"""日期"""
"""不可为空"""
ret = None
if ustr and len(ustr.strip()):
pass
else:
ret = u'为空'
return ret
def check_bbd_version(self, indexstr, ustr):
"""版本号"""
"""不可为空"""
ret = None
if ustr and len(ustr.strip()):
pass
else:
ret = u'为空'
return ret
def check_jgdm(self, indexstr, ustr):
"""机构代码"""
"""不为空;需满足9位“数字、字母”的组合(必须有数字)或9位纯数字"""
ret = None
if ustr and len(ustr.strip()):
if re.compile(u'^[\da-zA-Z]{9}$').match(ustr):
if not re.compile(u'[\d]').search(ustr):
ret = u'没有数字'
else:
ret = u'不满足9位“数字、字母”的组合'
else:
ret = u'为空'
return ret
def check_jgmc(self, indexstr, ustr):
"""机构名称"""
"""不为空;四个汉字以上,允许有全角括号、数字、英文(但必须有汉字)"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 4):
ret = u'没有4个以上汉字'
else:
ret = u'为空'
return ret
def check_jgdjzh(self, indexstr, ustr):
"""机构登记证号"""
"""可为空;若非空必须含有数字"""
ret = None
if ustr and len(ustr.strip()):
if not re.compile(u'[\d]').search(ustr):
ret = u'没有数字'
return ret
def check_credit_code(self, indexstr, ustr):
"""统一社会信用代码"""
"""可为空;若非空,需满足18位“数字、字母、*、-”的组合(必须有数字)"""
ret = None
if ustr and len(ustr.strip()):
if re.compile(u'^[\da-zA-Z\*-]{18}$').match(ustr):
if not re.compile(u'[\d]').search(ustr):
ret = u'没有数字'
else:
ret = u'不满足18位数字字母*-'
return ret
def check_organization_type(self, indexstr, ustr):
"""机构类型"""
"""可为空;若非空则必须为汉字"""
ret = None
if ustr and len(ustr.strip()):
if not public.is_allchinese(ustr):
ret = u'不全为汉字'
return ret
def check_certificate_date(self, indexstr, ustr):
"""办证日期"""
"""可为空;不为空则以下日期格式之一:yyyy年mm月dd日、yyyy-mm-dd、yyyy/mm/dd、yyyymmdd、yyyy.mm.dd"""
ret = None
if ustr and len(ustr.strip()):
if not pub | lic.date_format(ustr=ustr):
ret = u'不合法日期'
return ret
def check_certificate_orgname(self, indexstr, ustr):
"""办证机构名称"""
"""可为空;若非空必须含有汉字"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, | 1):
ret = u'没有汉字'
return ret
def check_esdate(self, indexstr, ustr):
"""注册日期"""
"""可为空;不为空则以下日期格式之一:yyyy年mm月dd日、yyyy-mm-dd、yyyy/mm/dd、yyyymmdd、yyyy.mm.dd"""
ret = None
if ustr and len(ustr.strip()):
if not public.date_format(ustr):
ret = u'不满足日期格式'
return ret
def check_validdate(self, indexstr, ustr):
"""截止日期"""
"""可为空;不为空则以下日期格式之一:yyyy年mm月dd日、yyyy-mm-dd、yyyy/mm/dd、yyyymmdd、yyyy.mm.dd"""
ret = None
if ustr and len(ustr.strip()):
if ustr and len(ustr.strip()):
if not public.date_format(ustr):
ret = u'不满足日期格式'
return ret
if __name__ == '__main__':
a = qyxg_zzjgdm()
c = a.check_jgmc(1, 'a11a(b1*1b)cc(a(d1*1d)e)eee')
print c
|
libscie/liberator | liberator/lib/python3.6/site-packages/requests/utils.py | Python | cc0-1.0 | 27,607 | 0.000616 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import contextlib
import io
import os
import platform
import re
import socket
import struct
import warnings
from .__version__ import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, OrderedDict, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
if platform.system() == 'Windows':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
if is_py3:
import winreg
else:
import _winreg as winreg
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but n | o useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except ( | OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/requests/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'va |
vollov/build-tracker | src/api/__init__.py | Python | mit | 34 | 0.029412 | # | -*- coding: utf-8 | -*-
import api |
Huluzai/DoonSketch | inkscape-0.48.5/share/extensions/voronoi2svg.py | Python | gpl-2.0 | 10,842 | 0.027301 | #!/usr/bin/env python
"""
voronoi2svg.py
Create Voronoi diagram from seeds (midpoints of selected objects)
- Voronoi Diagram algorithm and C code by Steven Fortune, 1987, http://ect.bell-labs.com/who/sjf/
- Python translation to file voronoi.py by Bill Simons, 2005, http://www.oxfish.com/
Copyright (C) 2011 Vincent Nivoliers and contributors
Contributors
~suv, <suv-sf@users.sf.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
import inkex, simplestyle, simplepath, simpletransform
import voronoi
import gettext
| _ = gettext.gettext
class Point:
def __init__(self,x,y | ):
self.x = x
self.y = y
class Voronoi2svg(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
#{{{ Additional options
self.OptionParser.add_option(
"--tab",
action="store",
type="string",
dest="tab")
self.OptionParser.add_option(
'--diagram-type',
action = 'store',
type = 'choice', choices=['Voronoi','Delaunay','Both'],
default = 'Voronoi',
dest='diagramType',
help = 'Defines the type of the diagram')
self.OptionParser.add_option(
'--clip-box',
action = 'store',
type = 'choice', choices=['Page','Automatic from seeds'],
default = 'Page',
dest='clipBox',
help = 'Defines the bounding box of the Voronoi diagram')
self.OptionParser.add_option(
'--show-clip-box',
action = 'store',
type = 'inkbool',
default = False,
dest='showClipBox',
help = 'Set this to true to write the bounding box')
#}}}
#{{{ Clipping a line by a bounding box
def dot(self,x,y):
return x[0]*y[0] + x[1]*y[1]
def intersectLineSegment(self,line,v1,v2):
s1 = self.dot(line,v1) - line[2]
s2 = self.dot(line,v2) - line[2]
if s1*s2 > 0:
return (0,0,False)
else:
tmp = self.dot(line,v1)-self.dot(line,v2)
if tmp == 0:
return(0,0,False)
u = (line[2]-self.dot(line,v2))/tmp
v = 1-u
return (u*v1[0]+v*v2[0],u*v1[1]+v*v2[1],True)
def clipEdge(self,vertices, lines, edge, bbox):
#bounding box corners
bbc = []
bbc.append((bbox[0],bbox[2]))
bbc.append((bbox[1],bbox[2]))
bbc.append((bbox[1],bbox[3]))
bbc.append((bbox[0],bbox[3]))
#record intersections of the line with bounding box edges
line = (lines[edge[0]])
interpoints = []
for i in range(4):
p = self.intersectLineSegment(line,bbc[i],bbc[(i+1)%4])
if (p[2]):
interpoints.append(p)
#if the edge has no intersection, return empty intersection
if (len(interpoints)<2):
return []
if (len(interpoints)>2): #happens when the edge crosses the corner of the box
interpoints = list(set(interpoints)) #remove doubles
#points of the edge
v1 = vertices[edge[1]]
interpoints.append((v1[0],v1[1],False))
v2 = vertices[edge[2]]
interpoints.append((v2[0],v2[1],False))
#sorting the points in the widest range to get them in order on the line
minx = interpoints[0][0]
maxx = interpoints[0][0]
miny = interpoints[0][1]
maxy = interpoints[0][1]
for point in interpoints:
minx = min(point[0],minx)
maxx = max(point[0],maxx)
miny = min(point[1],miny)
maxy = max(point[1],maxy)
if (maxx-minx) > (maxy-miny):
interpoints.sort()
else:
interpoints.sort(key=lambda pt: pt[1])
start = []
inside = False #true when the part of the line studied is in the clip box
startWrite = False #true when the part of the line is in the edge segment
for point in interpoints:
if point[2]: #The point is a bounding box intersection
if inside:
if startWrite:
return [[start[0],start[1]],[point[0],point[1]]]
else:
return []
else:
if startWrite:
start = point
inside = not inside
else: #The point is a segment endpoint
if startWrite:
if inside:
#a vertex ends the line inside the bounding box
return [[start[0],start[1]],[point[0],point[1]]]
else:
return []
else:
if inside:
start = point
startWrite = not startWrite
#}}}
#{{{ Transformation helpers
def invertTransform(self,mat):
det = mat[0][0]*mat[1][1] - mat[0][1]*mat[1][0]
if det !=0: #det is 0 only in case of 0 scaling
#invert the rotation/scaling part
a11 = mat[1][1]/det
a12 = -mat[0][1]/det
a21 = -mat[1][0]/det
a22 = mat[0][0]/det
#invert the translational part
a13 = -(a11*mat[0][2] + a12*mat[1][2])
a23 = -(a21*mat[0][2] + a22*mat[1][2])
return [[a11,a12,a13],[a21,a22,a23]]
else:
return[[0,0,-mat[0][2]],[0,0,-mat[1][2]]]
def getGlobalTransform(self,node):
parent = node.getparent()
myTrans = simpletransform.parseTransform(node.get('transform'))
if myTrans:
if parent is not None:
parentTrans = self.getGlobalTransform(parent)
if parentTrans:
return simpletransform.composeTransform(parentTrans,myTrans)
else:
return myTrans
else:
if parent is not None:
return self.getGlobalTransform(parent)
else:
return None
#}}}
def effect(self):
#{{{ Check that elements have been selected
if len(self.options.ids) == 0:
inkex.errormsg(_("Please select objects!"))
return
#}}}
#{{{ Drawing styles
linestyle = {
'stroke' : '#000000',
'linewidth' : '1',
'fill' : 'none'
}
facestyle = {
'stroke' : '#ff0000',
'linewidth' : '1',
'fill' : 'none'
}
#}}}
#{{{ Handle the transformation of the current group
parentGroup = self.getParentNode(self.selected[self.options.ids[0]])
trans = self.getGlobalTransform(parentGroup)
invtrans = None
if trans:
invtrans = self.invertTransform(trans)
#}}}
#{{{ Recovery of the selected objects
pts = []
nodes = []
seeds = []
for id in self.options.ids:
node = self.selected[id]
nodes.append(node)
bbox = simpletransform.computeBBox([node])
if bbox:
cx = 0.5*(bbox[0]+bbox[1])
cy = 0.5*(bbox[2]+bbox[3])
pt = [cx,cy]
if trans:
simpletransform.applyTransformToPoint(trans,pt)
pts.append(Point(pt[0],pt[1]))
seeds.append(Point(cx,cy))
#}}}
#{{{ Creation of groups to store the result
if self.options.diagramType != 'Delaunay':
# Voronoi
groupVoronoi = inkex.etree.SubElement(parentGroup,inkex.addNS('g','svg'))
groupVoronoi.set(inkex.addNS('label', 'inkscape'), 'Voronoi')
if invtrans:
simpletransform.applyTransformToNode(invtrans,groupVoronoi)
if self.options.diagramType != 'Voronoi':
# Delaunay
groupDelaunay = inkex.etree.SubElement(parentGroup,inkex.addNS('g','svg'))
groupDelaunay.set(inkex.addNS('label', 'inkscape'), 'Delaunay')
#}}}
#{{{ Clipping box handling
if self.options.diagramType != 'Delaunay':
#Clipping bounding box creation
gBbox = simpletransform.computeBBox(nodes)
#Clipbox is the box to which the Voronoi diagram is restricted
clipBox = ()
if self.options.clipBox == 'Page':
svg = self.document.getroot()
w = inkex.unittouu(svg.get('width'))
h = inkex.unittouu(svg.get('height'))
clipBox = (0 |
magicalbob/ctime | ctime_blank.py | Python | gpl-2.0 | 698 | 0.001433 | """ blank screen to stop game being played out-of-hours """
import random
import os
import pygame
import pygame.locals
from ctime_common import go_fullscreen
class BlankScreen():
""" a blank screen with no controls """
def __init__(self, ctime, screen_width, screen_height, log):
log.info('Time for bed said Zeberd | ee')
self.screen_size = {'width': screen_width, 'height': scree | n_height}
self.screen = pygame.display.get_surface()
self.screen.fill(pygame.Color(0, 0, 0, 0),
(0, 0, screen_width, screen_height),
0)
log.info('Lights out')
ctime.button_power.rpi_power()
go_fullscreen()
|
PaulWGraham/ExampleSite | brogue/brogue/wsgi.py | Python | mit | 390 | 0 | """
WSGI config for brogue | project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brogue.settings")
application = get_wsgi_app | lication()
|
vitojph/flws | flws/flwsnl.py | Python | apache-2.0 | 7,573 | 0.005547 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple flask-based API to access pattern.nl functionalities.
"""
__author__ = "Víctor Peinado"
__email__ = "vitojph@gmail.com"
__date__ = "19/07/2013"
from pattern.nl import parsetree, Word
from flask import Flask, Response, request
from flask.ext.restful import Api, Resource
import json
# #################################################################
# FreeLing settings (borrowed from freeling-3.0/APIs/python/sample.py)
PUNCTUATION = u""".,;:!? """
# #################################################################
# flask API
app = Flask(__name__)
api = Api(app)
# ##############################################################################
def handleParsedTreeAsString(parsedTree):
"""Handles a pattern parsed tree and transforms it into a S(entence)"""
output = []
for sentence in parsedTree:
output.append("S(")
for chunk in sentence.constituents(pnp=T | rue):
output.append("%s(" % chunk.type)
# handle PNP chunks
if isinstance(chunk, Word):
output.append("%s/%s/%s" % (chunk.string, chunk.lemma, chunk.tag))
else:
if chunk.type == "PNP":
| for ch in chunk.chunks:
output.append("%s(" % ch.type)
for word in ch.words:
output.append("%s/%s/%s" % (word.string, word.lemma, word.tag))
output.append(")")
output.append(")")
else:
for word in chunk.words:
output.append("%s/%s/%s" % (word.string, word.lemma, word.tag))
output.append(")")
output.append(")")
return output
# ##############################################################################
def handleParsedTreeAsJSON(parsedTree):
"""Handles a pattern parsed tree and transforms it into a structured JSON format"""
output = []
parent = "ROOT"
depth = 0
for sentence in parsedTree:
output.append(dict(tag="S", parent=parent, level=depth))
for chunk in sentence.constituents(pnp=True):
depth = 1
parent = "S"
output.append(dict(tag=chunk.type, parent=parent, level=depth))
# handle PNP chunks
if isinstance(chunk, Word):
output.append(dict(text=chunk.string, lemma=chunk.lemma, tag=chunk.tag, parent=chunk.type, level=depth+1))
else:
parent = chunk.type
depth = 2
if chunk.type == "PNP":
for ch in chunk.chunks:
output.append(dict(tag=ch.type, parent=parent, level=depth))
parent = ch.type
depth = 3
for word in ch.words:
output.append(dict(text=word.string, lemma=word.lemma, tag=word.tag, parent=parent, level=depth))
else:
for word in chunk.words:
output.append(dict(text=word.string, lemma=word.lemma, tag=word.tag, parent=parent, level=depth))
return output
# ##############################################################################
class Splitter(Resource):
"""Splits an input text into sentences."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
# output list of sentences
outputSentences = []
parsedTree = parsetree(text)
for sentence in parsedTree:
outputTokens = []
for w in sentence.words:
outputTokens.append(w.string)
outputSentences.append(dict(oracion=" ".join(outputTokens)))
return Response(json.dumps(outputSentences), mimetype="application/json")
class TokenizerSplitter(Resource):
"""Splits an input text into tokenized sentences."""
def post(self):
text = request.json["texto"]
if text[-1] not in PUNCTUATION:
text = text + "."
# output list of sentences
outputSentences = []
parsedTree = parsetree(text)
for sentence in parsedTree:
outputTokens = []
for w in sentence.words:
outputTokens.append(w.string)
outputSentences.append(dict(oracion=outputTokens))
return Response(json.dumps(outputSentences), mimetype="application/json")
# ##############################################################################
class Tagger(Resource):
"""Performs POS tagging from an input text."""
def post(self):
"""docstring for post"""
text = request.json["texto"]
# set output format: default is json
try:
format = request.json["format"]
except KeyError:
format = "json"
# set tagset: default is WOTAN
try:
tagset = request.json["tagset"]
except KeyError:
tagset = "wotan"
# load the specified tagset
if tagset == "penn":
parsedTree = parsetree(text, relations=True, lemmata=True)
else:
parsedTree = parsetree(text, relations=True, lemmata=True, tagset=tagset)
output = []
for sentence in parsedTree:
for word in sentence:
lemmas = []
lemmas.append(dict(lema=word.lemma, categoria=word.pos))
output.append(dict(palabra=word.string, lemas=lemmas))
return Response(json.dumps(output), mimetype="application/json")
# ##############################################################################
class Parser(Resource):
"""FreeLing parser with three output formats: freeling-like, stanford-like and jsonified"""
def post(self):
"""docstring for post"""
text = request.json["texto"]
# set output format: default is json
try:
format = request.json["format"]
except KeyError:
format = "string"
# set tagset: default is WOTAN
try:
tagset = request.json["tagset"]
except KeyError:
tagset = "wotan"
# load the specified tagset
if tagset == "penn":
parsing = parsetree(text, relations=True, lemmata=True)
else:
parsing = parsetree(text, relations=True, lemmata=True, tagset=tagset)
if format == "string":
parsedtree = handleParsedTreeAsString(parsing)
elif format == "json":
parsedtree = handleParsedTreeAsJSON(parsing)
# format the output accordingly
if format == "string":
return Response(json.dumps(dict(tree=" ".join(parsedtree))), mimetype="application/json")
elif format == "json":
return Response(json.dumps(parsedtree), mimetype="application/json")
# #############################################################################
# Api resource routing
# split a text into sentences
api.add_resource(Splitter, "/splitter")
# split a text into tokenized sentences
api.add_resource(TokenizerSplitter, "/tokenizersplitter")
# perform PoS tagging from an input text
api.add_resource(Tagger, "/tagger")
# returns a parsed tree
api.add_resource(Parser, "/parser")
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port=9999)
|
ajaniv/django-core-models | configs/common/django.py | Python | mit | 3,872 | 0 | """
.. module:: configs.common.django
:synopsis: Django settings file.
Django settings for django_core_models project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from .root import | PROJECT_ROOT, DEBUG
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
BASE_DIR = PROJECT_ROOT
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the s | ecret key used in production secret!
# SECRET_KEY = 'ubd(v=i$@^plvy!-yyo*&@xi!y0514r4wkjp49k+e@r&*)7u-_'
ENV_SECRET_KEY = 'DJANGO_SECRET_KEY'
SECRET_KEY = os.environ[ENV_SECRET_KEY]
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = []
# Application definition
BASIC_DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
]
EXTENDED_DJANGO_APPS = [
'rest_framework',
]
CUSTOM_APPS = [
'django_core_models.core.apps.CoreModelsConfig',
'django_core_models.demographics.apps.DemographicsConfig',
'django_core_models.images.apps.ImageConfig',
'django_core_models.locations.apps.LocationConfig',
'django_core_models.organizations.apps.OrganizationConfig',
'django_core_models.social_media.apps.SocialMediaConfig',
]
INSTALLED_APPS = BASIC_DJANGO_APPS + EXTENDED_DJANGO_APPS + CUSTOM_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_core_models_settings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'django_core_models_settings.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
VALIDATOR_PATH = 'django.contrib.auth.password_validation.'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': VALIDATOR_PATH + 'UserAttributeSimilarityValidator',
},
{
'NAME': VALIDATOR_PATH + 'MinimumLengthValidator',
},
{
'NAME': VALIDATOR_PATH + 'CommonPasswordValidator',
},
{
'NAME': VALIDATOR_PATH + 'NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
SITE_ID = 1
|
JeffAMcGee/friendloc | friendloc/explore/fixgis.py | Python | bsd-2-clause | 1,516 | 0.009894 | #!/usr/bin/env python
from collections import defaultdict
import itertools
import numpy
from friendloc.base import utils,gob
from friendloc.base.gisgraphy import GisgraphyResource
@gob.mapper(all_items=True)
def gnp_gps(users):
"find home location and geocoded location for geocoded users"
gis = GisgraphyResource()
for user in itertools.islice(users,2600,None):
gnp = gis.twitter_loc(user['location'])
if gnp:
yield (gnp.to_d(), user['mloc'])
@gob.mapper(all_items=True)
def md | ists(gnp_gps):
"find median location error and save to a dict using geocoded users"
item_cutoff=2
kind_cutoff=5
mdist = {}
dists = defaultdict(list)
gnps = {}
for gnp,m | loc in gnp_gps:
d = utils.coord_in_miles(gnp,mloc)
id = gnp.get('fid','COORD')
dists[id].append(d)
gnps[id] = gnp
codes = defaultdict(list)
for k,gnp in gnps.iteritems():
if len(dists[k])>item_cutoff:
#add an entry for each feature that has a meaningful median
mdist[str(k)] = numpy.median(dists[k])
else:
codes[gnp.get('code')].append(dists[k][0])
other = []
for k,code in codes.iteritems():
if len(code)>kind_cutoff:
#add an entry for each feature code that has a meaningful median
mdist[k] = numpy.median(codes[k])
else:
other.extend(code)
#add a catch-all for everything else
mdist['other'] = numpy.median(other)
yield mdist
|
dladd/pyFormex | pyformex/plugins/f2flu.py | Python | gpl-3.0 | 7,649 | 0.016473 | # $Id$ *** pyformex ***
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Formex to Fluent translator.
This module contains some functions that can aid in exporting
pyFormex models to Fluent.
This script should be executed with the command
pyformex --nogui f2flu.py <stl_model>
"""
from __future__ import print_function
import sys
from plugins import tetgen
from elements import Tet4
from time import strftime, gmtime
from numpy import *
def writeHeading(fil, nodes, elems, text=''):
"""Write the heading of the Gambit neutral file.""" #currently only for hexahedral mesh
fil.write(" CONTROL INFO 2.2.30\n")
fil.write("** GAMBIT NEUTRAL FILE\n")
fil.write('%s\n' %text)
fil.write('PROGRAM: Gambit VERSION: 2.2.30\n')
fil.write(strftime('%d %b %Y %H:%M:%S\n', gmtime()))
fil.write(' NUMNP NELEM NGRPS NBSETS NDFCD NDFVL\n')
fil.write('%10i%10i%10i%10i%10i%10i\n' % (shape(nodes)[0],shape(elems)[0],1,0,3,3))
fil.write('ENDOFSECTION\n')
def writeNodes(fil, nodes, nofs=1):
"""Write nodal coordinates.
The nofs specifies an offset for the node numbers.
The default is 1, because Gambit numbering starts at 1.
"""
fil.write(' NODAL COORDINATES 2.2.30\n')
for i,n in enumerate(nodes):
fil.write("%10d%20.11e%20.11e%20.11e\n" % ((i+nofs,)+tuple(n)))
fil.write('ENDOFSECTION\n')
def writeElems(fil, elems1, eofs=1, nofs=1):
"""Write element connectivity.
The eofs and nofs specify offsets for element and node numbers.
The default is 1, because Gambit numbering starts at 1.
"""
#pyFormex uses the same convention for hexahedral elements as ABAQUS
#Gambit uses a different convention
#function currently only for hexahedral mesh
elems = elems1.copy()
elems[:,2] = elems1[:,3]
elems[:,3] = elems1[:,2]
elems[:,6] = elems1[:,7]
elems[:,7] = elems1[:,6]
fil.write(' ELEMENTS/CELLS 2.2.30\n')
for i,e in enumerate(elems+nofs):
fil.write('%8d %2d %2d %8d%8d%8d%8d%8d%8d%8d\n %8d\n' % ((i+eofs,4,8)+tuple(e)))
fil.write('ENDOFSECTION\n')
def writeGroup(fil, elems):
"""Write group of elements.
The eofs and nofs specify offsets for element and node numbers.
The default is 1, because Gambit numbering starts at 1.
"""
fil.write(' ELEMENT GROUP 2.2.30\n')
fil.write('GROUP:%11d ELEMENTS:%11d MATERIAL:%11d NFLAGS:%11d\n' % (1,shape(elems)[0],2,1))
fil.write('%32s\n' %'fluid')
fil.write('%8d\n' %0)
n = shape(elems)[0]/10
for i in range(n):
fil.write('%8d%8d%8d%8d%8d%8d%8d%8d%8d%8d\n' %(10*i+1,10*i+2,10*i+3,10*i+4,10*i+5,10*i+6,10*i+7,10*i+8,10*i+9,10*i+10))
for j in range(shape(elems)[0]-10*n):
fil.write('%8d' %(10*n+j+1))
fil.write('\n')
fil.write('ENDOFSECTION\n')
def read_tetgen(filename):
"""Read a tetgen tetraeder model.
filename is the base of the path of the input files.
For a filename 'proj', nodes are expected in 'proj.1.node' and
elems are in file 'proj.1.ele'.
"""
nodes = tetgen.readNodes(filename+'.1.node')
print("Read %d nodes" % nodes.shape[0])
elems = tetgen.readElems(filename+'.1.ele')
print("Read %d tetraeders" % elems.shape[0])
return nodes,elems
def encode(i,j,k,n):
return n*(n*i+j)+k
def decode(code,n):
q,k = code/n, code%n
i,j = q/n, q%n
return i,j,k
def write_neu_hex(fil, mesh, eofs=1, nofs=1):
"""Write a hexahedral mesh to a .neu file (For use in Gambit)
fil: file name
mesh: pyFormex Mesh
eofs, nofs: offsets for element and node numbers.
"""
if not fil.endswith('.neu'):
fil += '.neu'
f = open(fil, 'w')
writeHeading(f, mesh.coords, mesh.elems)
print('Writing %s nodes to .neu file'% len(mesh.coords))
writeNodes(f, mesh.coords, nofs=nofs)
print('Writing %s elements to .neu file'%len(mesh.elems))
writeElems(f, mesh.elems, eofs=eofs, nofs=nofs)
f.close()
print('Hexahedral mesh exported to \"%s\ | "'%fil)
|
def output_fluent(fil,nodes,elems):
"""Write a tetraeder mesh in Fluent format to fil.
The tetraeder mesh consists of an array of nodal coordinates
and an array of element connectivity.
"""
print("Nodal coordinates")
print(nodes)
print("Element connectivity")
print(elems)
faces = array(Tet4.faces[1]) # Turning faces into an array is important !
print("Tetraeder faces")
print(faces)
elf = elems.take(faces,axis=1)
# Remark: the shorter syntax elems[faces] takes its elements along the
# axis 0. Then we would need to transpose() first (and probably
# swap axes again later)
print("The faces of the elements:")
print(elf)
# We need a copy to sort the nodes (sorting is done in-place)
elfs = elf.copy()
elfs.sort(axis=2)
print("The faces with sorted nodes:")
print(elfs)
magic = elems.max()+1
print("Magic number = %d" % magic)
code = encode(elfs[:,:,0],elfs[:,:,1],elfs[:,:,2],magic)
# Remark how nice the encode function works on the whole array
print("Encoded faces:")
print(code)
code = code.ravel()
print(code)
print("Just A Check:")
print("Element 5 face 2 is %s " % elf[5,2])
print("Element 5 face 2 is %s " % list(decode(code[4*5+2],magic)))
srt = code.argsort()
print(srt)
print(code[srt])
# Now shipout the faces in this order, removing the doubles
j = -1
for i in srt:
if j < 0: # no predecessor (or predecessor already shipped)
j = i
else:
e1,f1 = j/4, j%4
if code[i] == code[j]:
e2,f2 = i/4, i%4
j = -1
else:
e2 = -1
j = i
print("Face %s belongs to el %s and el %s" % ( elf[e1,f1], e2, e1 ))
def tetgen2fluent(filename):
"""Convert a tetgen tetraeder model to fluent.
filename is the base path of the tetgen input files.
This will create a Fluent model in filename+'.flu'
"""
nodes,elems = read_tetgen(filename)
if nodes is None or elems is None:
print("Error while reading model %s" % filename)
return
fil = open(filename+'.flu','w')
if fil:
output_fluent(fil,nodes,elems)
fil.close()
# This is special for pyFormex scripts !
if __name__ == "script":
for arg in argv:
print("Converting model %s" % arg)
tetgen2fluent(arg)
argv = [ 'hallo' ]
# End
|
pe2mbs/ajenti-mdadm | make_messages.py | Python | agpl-3.0 | 4,684 | 0.00491 | #!/usr/bin/env python
# coding=utf-8
# ---------------------------------------------------------------------------
# Messages compiler for Ajenti plugins.
#
# Copyright (C) 2015 Marc Bertens <m.bertens@pe2mbs.nl>
# Adapted from make_messages.py by Eugene Pankov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
# ---------------------------------------------------------------------------
#
import os
import sys
import subprocess
from lxml import etree
def check_call(*args):
try:
subprocess.call(*args)
except Exception as e:
print('Call failed')
print(' '.join(args[0]))
print(str(e))
LOCALEDIR = 'locales'
LANGUAGES = [x for x in os.listdir(LOCALEDIR) if not '.' in x]
pot_path = os.path.join(LOCALEDIR, 'ajenti.po')
if len(sys.argv) != 2:
print('Usage: ./make_messages.py [extract|compile]')
sys.exit(1)
# end if
if subprocess.call(['which', 'xgettext']) != 0:
print('xgettext app not found')
sys.exit(0)
# end if
if sys.argv[1] == 'extract':
os.unlink(pot_path)
for ( dirpath, dirnames, filenames ) in os.walk('ajenti', followlinks=True):
if '/custom_' in dirpath:
continue
# end if
if '/elements' in dirpath:
continue
# end if
for f in filenames:
path = os.path.join(dirpath, f)
if f.endswith('.py'):
print('Extracting from %s' % path)
check_call([
'xgettext',
'-c',
'--from-code=utf-8',
'--omit-header',
'-o', pot_path,
'-j' if os.path.exists(pot_path) else '-dajenti',
path,
])
# end if f.endswit | h('.py')
if f.endswith('.xml'):
print('Extracting from %s' % path)
content = open(path).read()
xml = etree.fromstring('<xml xmlns:bind="bind" xmlns:binder="binder">' + content + '</xml>')
try:
| msgs = []
def traverse(n):
for k, v in n.items():
if v.startswith('{') and v.endswith('}'):
msgs.append(v[1:-1])
try:
if "_('" in v:
eval(v, {'_': msgs.append})
except:
pass
# end try
# next k, v
for c in n:
traverse(c)
# next c
traverse(xml)
fake_content = ''.join('gettext("%s");\n' % msg for msg in msgs)
fake_content = 'void main() { ' + fake_content + ' }'
open(path, 'w').write(fake_content)
check_call([
'xgettext',
'-C',
'--from-code=utf-8',
'--omit-header',
'-o', pot_path,
'-j' if os.path.exists(pot_path) else '-dajenti',
path,
])
finally:
open(path, 'w').write(content)
# end try
# end if f.endswith('.xml')
# next f
# next ( dirpath, dirnames, filenames )
# end if sys.argv[1] == 'extract'
if sys.argv[1] == 'compile':
print( LANGUAGES )
for lang in LANGUAGES:
po_dir = os.path.join( LOCALEDIR, lang, 'LC_MESSAGES' )
po_path = os.path.join( po_dir, 'ajenti.po' )
mo_path = os.path.join( po_dir, 'ajenti.mo' )
if not os.path.exists( po_dir ):
os.makedirs( po_dir )
# end if
print('Compiling %s' % lang)
check_call([
'msgfmt',
po_path,
'-v',
'-o', mo_path
])
# next lang
# end if sys.argv[1] == 'compile' |
nullr0ute/oz | oz/Linux.py | Python | lgpl-2.1 | 16,373 | 0.002016 | # Copyright (C) 2013-2017 Chris Lalancette <clalancette@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
Linux installation
"""
import os
import re
import time
import libvirt
import oz.Guest
import oz.OzException
class LinuxCDGuest(oz.Guest.CDGuest):
"""
Class for Linux installation.
"""
def __init__(self, tdl, config, auto, output_disk, nicmodel, diskbus,
iso_allowed, url_allowed, macaddress, useuefi):
oz.Guest.CDGuest.__init__(self, tdl, config, auto, output_disk,
nicmodel, None, None, diskbus, iso_allowed,
url_allowed, macaddress, useuefi)
def _test_ssh_connection(self, guestaddr):
"""
Internal method to test out the ssh connection before we try to use it.
Under systemd, the IP address of a guest can come up and reportip can
run before the ssh key is generated and sshd starts up. This check
makes sure that we allow an additional 30 seconds (1 second per ssh
attempt) for sshd to finish initializing.
"""
count = 30
success = False
while count > 0:
try:
self.log.debug("Testing ssh connection, try %d", count)
start = time.time()
self.guest_execute_command(guestaddr, 'ls', timeout=1)
self.log.debug("Succeeded")
success = True
break
except oz.ozutil.SubprocessException:
# ensure that we spent at least one second before trying again
end = time.time()
if (end - start) < 1:
time.sleep(1 - (end - start))
count -= 1
if not success:
self.log.debug("Failed to connect to ssh on running guest")
raise oz.OzException.OzException("Failed to connect to ssh on running guest")
def get_default_runlevel(self, g_handle):
"""
Function to determine the default runlevel based on the /etc/inittab.
"""
runlevel = "3"
if g_handle.exists('/etc/inittab'):
lines = g_handle.cat('/etc/inittab').split("\n")
for line in lines:
if re.match('id:', line):
runlevel = line.split(':')[1]
break
return runlevel
def guest_execute_command(self, guestaddr, command, timeout=10):
"""
Method to execute a command on the guest and return the output.
"""
# ServerAliveInterval protects against NAT firewall timeouts
# on long-running commands with no output
#
# PasswordAuthentication=no prevents us from falling back to
# keyboard-interactive password prompting
#
# -F /dev/null makes sure that we don't use the global or per-user
# configuration files
return oz.ozutil.subprocess_check_output(["ssh", "-i", self.sshprivkey,
"-F", "/dev/null",
"-o", "ServerAliveInterval=30",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=" + str(timeout),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no",
"-o", "IdentitiesOnly yes",
"root@" + guestaddr, command],
printfn=self.log.debug)
def guest_live_upload(self, guestaddr, file_to_upload, destination,
timeout=10):
"""
Method to copy a file to the live guest.
"""
self.guest_execute_command(guestaddr,
"mkdir -p " + os.path.dirname(destination),
timeout)
# ServerAliveInterval protects against NAT firewall timeouts
# on long-running commands with no output
#
# PasswordAuthentication=no prev | ents us from falling back to
# keyboard-interactive password prompting
#
# -F /dev/null makes sure t | hat we don't use the global or per-user
# configuration files
return oz.ozutil.subprocess_check_output(["scp", "-i", self.sshprivkey,
"-F", "/dev/null",
"-o", "ServerAliveInterval=30",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=" + str(timeout),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no",
"-o", "IdentitiesOnly yes",
file_to_upload,
"root@" + guestaddr + ":" + destination],
printfn=self.log.debug)
def _customize_files(self, guestaddr):
"""
Method to upload the custom files specified in the TDL to the guest.
"""
self.log.info("Uploading custom files")
for name, fp in list(self.tdl.files.items()):
# all of the self.tdl.files are named temporary files; we just need
# to fetch the name out and have scp upload it
self.guest_live_upload(guestaddr, fp.name, name)
def _shutdown_guest(self, guestaddr, libvirt_dom):
"""
Method to shutdown the guest (gracefully at first, then with prejudice).
"""
if guestaddr is not None:
# sometimes the ssh process gets disconnected before it can return
# cleanly (particularly when the guest is running systemd). If that
# happens, ssh returns 255, guest_execute_command throws an
# exception, and the guest is forcibly destroyed. While this
# isn't the end of the world, it isn't desirable. To avoid
# this, we catch any exception thrown by ssh during the shutdown
# command and throw them away. In the (rare) worst case, the
# shutdown will not have made it to the guest and we'll have to wait
# 90 seconds for wait_for_guest_shutdown to timeout and forcibly
# kill the guest.
try:
self.guest_execute_command(guestaddr, 'shutdown -h now')
except Exception:
pass
try:
if not self._wait_for_guest_shutdown(libvirt_dom):
self.log.warning("Guest did not shutdown in time, going to kill")
else:
libvirt_dom = None
except Exception:
self.log.warning("Failed shutting down guest, forcibly killing")
if libvirt_dom is not None:
try:
libvirt_dom.destroy()
except libvirt.libvirtError:
# the destroy failed for some reason. This can happen if
# _wait_for_guest_shutdown times out, but the domain shuts
# down before we get to destroy. Check to make sure that the
|
gaqzi/py-gocd-cli | gocd_cli/settings.py | Python | mit | 3,825 | 0.000261 | import ConfigParser
from os import getenv
class BaseSettings(object):
def __init__(self, **kwargs):
pass
def get(self, option):
"""Tries to find a configuration variable in the current store.
Returns:
string, number, boolean or other representation of what was found or
None when nothin | g found.
"""
return None
class IniSettings(BaseSettings):
"""Reads configuration from ini files scoped to a specific section.
Args:
section: The ini file | section this configuration is scoped to
filename: The path to the ini file to use
Example:
settings = IniSettings(section='main',
filename='gocd-cli.cfg')
settings.get('api_key')
"""
def __init__(self, **kwargs):
self.section = kwargs.get('section', '').lower()
self.config = ConfigParser.SafeConfigParser()
filename = kwargs.get('filename', None)
if filename:
self.config.read(filename)
super(IniSettings, self).__init__(**kwargs)
def get(self, option):
try:
return self.config.get(self.section, option)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return super(IniSettings, self).get(option)
class EnvironmentSettings(BaseSettings):
"""Reads configuration variables from the system environment.
Args:
prefix: converted to uppercase and used with the option name
to find an environment variable to read.
Example:
settings = EnvironmentSettings(prefix='GOCD')
settings.get('user') # reads: GOCD_USER
"""
def __init__(self, **kwargs):
self.prefix = kwargs.get('prefix', '').upper()
super(EnvironmentSettings, self).__init__(**kwargs)
def get(self, option):
val = getenv(
'{0}_{1}'.format(self.prefix, option.upper()),
None
)
if val is None:
return super(EnvironmentSettings, self).get(option)
else:
return val
class EncryptedSettings(BaseSettings):
"""Will look for, and decrypt, encrypted values for keys if
encryption module is set. The encryption module will be dynamically
imported and giving the password to the module has to happen out
of band.
This relies on being able to get the ciphertext from the other
methods of retrieving configuration values. Therefore it has to be
listed first in the mixed in class.
"""
encryption_module = False
def __init__(self, **kwargs):
super(EncryptedSettings, self).__init__(**kwargs)
encryption_module = self.get('encryption_module')
if encryption_module:
mod = __import__(encryption_module, fromlist=('',))
if mod:
self.encryption_module = mod
def get(self, option):
if self.encryption_module:
val = super(EncryptedSettings, self).get('{0}_encrypted'.format(option))
if val:
return self._decrypt(val)
return super(EncryptedSettings, self).get(option)
def _decrypt(self, val):
return self.encryption_module.decrypt(val)
class Settings(EncryptedSettings, EnvironmentSettings, IniSettings):
def __init__(self, prefix, section, filename=None):
"""Will try to read configuration from environment variables and ini
files, if no value found in either of those ``None`` is
returned.
Args:
prefix: The environment variable prefix.
section: The ini file section this configuration is scoped to
filename: The path to the ini file to use
"""
options = dict(prefix=prefix, section=section, filename=filename)
super(Settings, self).__init__(**options)
|
mailhexu/pyDFTutils | pyDFTutils/vasp/myvasp.py | Python | lgpl-3.0 | 29,832 | 0.001542 | #! /usr/bin/env python
from pyDFTutils.vasp.vasp_utils import read_poscar_and_unsort
import ase.calculators.vasp.create_input
if 'maskcell' not in ase.calculators.vasp.create_input.int_keys:
ase.calculators.vasp.create_input.int_keys.append('maskcell')
from ase.calculators.vasp import Vasp, Vasp2
from ase.dft.kpoints import get_bandpath
import matplotlib.pyplot as plt
import os
from os.path import join
import sys
from ase.utils import devnull, basestring
import numpy as np
import | tempfile
import ase.io
from pyDFTutils.ase_utils.symbol import symbol_number
import socket
from shutil import copyfile, move
default_pps_1 = {
'Pr': 'Pr_3 | ',
'Ni': 'Ni',
'Yb': 'Yb_3',
'Pd': 'Pd',
'Pt': 'Pt',
'Ru': 'Ru_pv',
'S': 'S',
'Na': 'Na_pv',
'Nb': 'Nb_sv',
'Nd': 'Nd_3',
'C': 'C',
'Li': 'Li_sv',
'Pb': 'Pb_d',
'Y': 'Y_sv',
'Tl': 'Tl_d',
'Lu': 'Lu_3',
'Rb': 'Rb_sv',
'Ti': 'Ti_sv',
'Te': 'Te',
'Rh': 'Rh_pv',
'Tc': 'Tc_pv',
'Ta': 'Ta_pv',
'Be': 'Be',
'Sm': 'Sm_3',
'Ba': 'Ba_sv',
'Bi': 'Bi_d',
'La': 'La',
'Ge': 'Ge_d',
'Po': 'Po_d',
'Fe': 'Fe',
'Br': 'Br',
'Sr': 'Sr_sv',
'Pm': 'Pm_3',
'Hf': 'Hf_pv',
'Mo': 'Mo_sv',
'At': 'At_d',
'Tb': 'Tb_3',
'Cl': 'Cl',
'Mg': 'Mg',
'B': 'B',
'F': 'F',
'I': 'I',
'H': 'H',
'K': 'K_sv',
'Mn': 'Mn_pv',
'O': 'O',
'N': 'N',
'P': 'P',
'Si': 'Si',
'Sn': 'Sn_d',
'W': 'W_sv',
'V': 'V_sv',
'Sc': 'Sc_sv',
'Sb': 'Sb',
'Os': 'Os',
'Dy': 'Dy_3',
'Se': 'Se',
'Hg': 'Hg',
'Zn': 'Zn',
'Co': 'Co',
'Ag': 'Ag',
'Re': 'Re',
'Ca': 'Ca_sv',
'Ir': 'Ir',
'Eu': 'Eu_3',
'Al': 'Al',
'Ce': 'Ce_3',
'Cd': 'Cd',
'Ho': 'Ho_3',
'As': 'As',
'Gd': 'Gd_3',
'Au': 'Au',
'Zr': 'Zr_sv',
'Ga': 'Ga_d',
'In': 'In_d',
'Cs': 'Cs_sv',
'Cr': 'Cr_pv',
'Tm': 'Tm_3',
'Cu': 'Cu',
'Er': 'Er_3'
}
default_pps = {}
for p in default_pps_1:
v = default_pps_1[p]
default_pps[p] = v[len(p):]
class myvasp(Vasp2):
def __init__(self,
restart=None,
output_template='vasp',
track_output=False,
tempdir=None,
**kwargs):
self.force_no_calc = False
self.vca=None
self.tempdir = tempdir
Vasp2.__init__(
self,
restart=None,
**kwargs)
self.commander = None
self.command=None
def set_commander(self, commander):
self.commander = commander
def set_command(self, command):
self.command=command
def set_vca(self, vca):
self.vca=vca
def run(self):
"""Method which explicitely runs VASP."""
stderr = sys.stderr
p = self.input_params
if p['txt'] is None:
sys.stderr = devnull
elif p['txt'] == '-':
pass
elif isinstance(p['txt'], basestring):
sys.stderr = open(p['txt'], 'w')
if self.commander is not None:
exitcode = self.commander.run()
elif self.command is not None:
exitcode=os.system(self.command)
elif 'VASP_COMMAND' in os.environ:
vasp = os.environ['VASP_COMMAND']
exitcode = os.system('%s > %s' % (vasp, 'log'))
elif 'VASP_SCRIPT' in os.environ:
vasp = os.environ['VASP_SCRIPT']
locals = {}
exec(compile(open(vasp).read(), vasp, 'exec'), {}, locals)
exitcode = locals['exitcode']
else:
raise RuntimeError('Please set either VASP_COMMAND'
' or VASP_SCRIPT environment variable')
sys.stderr = stderr
if exitcode != 0:
raise RuntimeError('Vasp exited with exit code: %d. ' % exitcode)
def magnetic_calculation(self, atoms, do_nospin=True):
self.set(ispin=1, istart=0)
self.calculate(atoms)
self.set(
ispin=2,
istart=0,
icharg=1,
nelm=150,
amix=0.2,
amix_mag=0.8,
bmix=0.0001,
bmix_mag=0.0001,
maxmix=20)
self.calculate(atoms)
def clean(self):
"""Method which cleans up after a calculation.
The default files generated by Vasp will be deleted IF this
method is called.
"""
files = [
'CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR', 'DOSCAR',
'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR', 'OUTCAR', 'PCDAT',
'POTCAR', 'vasprun.xml', 'WAVECAR', 'XDATCAR', 'PROCAR',
'ase-sort.dat', 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2'
]
for f in files:
try:
os.remove(f)
except OSError:
pass
if self.tempdir is not None:
for f in files:
try:
os.remove(os.path.join(self.tempdir, f))
except OSError:
pass
def myrelax_calculation(self, atoms, do_nospin=False, pre_relax=True, pre_relax_method='dampedmd'):
"""
a optimized stratigey to do the relax.
do_nospin: if do_nospin is True, a non-spin-polarized relaxation is done first.
"""
ispin = self.int_params['ispin']
nelmdl = self.int_params['nelmdl']
ibrion = self.int_params['ibrion']
sigma = self.float_params['sigma']
#smass = self.float_params['smass']
#potim = self.float_params['potim']
if sigma is None:
sigma = 0.1
ediff = self.exp_params['ediff']
if ediff is None:
ediff = 1e-4
ediffg = self.exp_params['ediffg']
if ediffg is None:
ediffg = -0.01
ldipol = self.bool_params['ldipol']
if ldipol is None:
ldipol = False
nsw = self.int_params['nsw']
#first do this
if pre_relax and pre_relax_method=='cg':
self.set(
nelmdl=6,
nelmin=-9,
ediff=3e-3,
ediffg=-0.3,
nsw=30,
ibrion=1,
sigma=sigma * 3,
ldipol=False,
maxmix=-20)
if pre_relax and pre_relax_method=='dampedmd':
self.set(
nelmdl=6,
nelmin=-9,
ediff=3e-3,
ediffg=-0.3,
nsw=30,
ibrion=3,
sigma=sigma * 3,
ldipol=False,
potim=0.1,
smass=1.1,
maxmix=-20)
if do_nospin:
print("----------------------------------------------------")
self.set(ispin=1)
self.calculate(atoms)
atoms = self.read_contcar(atoms, filename='CONTCAR')
# if do_nospin
if do_nospin:
self.set(
ispin=1,
nelmdl=nelmdl,
nelmin=5,
ediff=ediff,
ediffg=ediffg,
ibrion=ibrion,
sigma=sigma,
ldipol=ldipol,
nsw=30,
maxmix=40)
self.set(istart=1)
self.calculate(atoms)
atoms=self.read_contcar(atoms, filename='CONTCAR')
# then increase the accuracy.
self.set(
ispin=ispin,
nelmdl=nelmdl,
nelmin=5,
ediff=ediff,
ediffg=ediffg,
ibrion=ibrion,
sigma=sigma,
ldipol=ldipol,
nsw=nsw,
maxmix=40,
#smass=smass,
nfree=15)
#self.read_contcar(filename='CONTCAR')
self.set(istart=1)
self.calculate(atoms)
atoms=self.read_contcar(atoms, filename='CONTCAR')
if not os.path.exists('RELAX'):
os.mkdir('RELAX')
for f in ['POSCAR', 'OUTCAR', 'EIGENVAL', 'CONTCAR', 'INCAR', 'log']:
if os.path.exists(f):
copyfile(f, os.path.join |
rhiever/bokeh | bokeh/models/widgets/icons.py | Python | bsd-3-clause | 948 | 0.00211 | """ Various kinds of icon widgets.
"""
from __future__ import absolute_import
from ...properties import Bool, Float, Enum
from ...enums import NamedIcon
from ..widget import Widget
class AbstractIcon(Widget):
""" An abstract base class for icon widgets. ``AbstractIcon``
is not generally us | eful to instantiate on its own.
"""
class Icon(AbstractIcon):
""" A "stock" icon based on FontAwesome.
"""
name = Enum(NamedIcon, help="""
What icon to use. See http://fortawesome.github.io/Font-Awesome/icons/
for the list of available icons.
""")
size = Float(None, help="""
The size multiplier (1x, 2x, ..., 5x).
""")
flip = Enum("horizontal", "vertical", default=No | ne, help="""
Optionally flip the icon horizontally or vertically.
""")
spin = Bool(False, help="""
Indicates a spinning (animated) icon. This value is ignored for
icons that do not support spinning.
""")
|
andrebellafronte/stoq | stoqlib/gui/test/test_client_details.py | Python | gpl-2.0 | 6,128 | 0.002285 | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import unittest
import mock
from stoqlib.database.runtime import StoqlibStore
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.sale import SaleView
from stoqlib.domain.workorder import WorkOrder
from stoqlib.gui.dialogs.clientdetails import ClientDetailsDialog
from stoqlib.gui.dialogs.saledetails import SaleDetailsDialog
from stoqlib.gui.editors.paymenteditor import InPaymentEditor
from stoqlib.gui.editors.personeditor import ClientEditor
from stoqlib.gui.editors.workordereditor import WorkOrderEditor
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.lib.dateutils import localtoday
class TestClientDetails(GUITest):
def test_show(self):
today = localtoday().date()
client = self.create_client()
# Nova venda
sale = self.create_sale()
sale.identifier = 123
sale.client = client
sale.open_date = today
# Product
self.create_sale_item(sale, product=True)
# Service
item = self.create_sale_item(sale, product=False)
item.estimated_fix_date = today
# Payments
payment = self.add_payments(sale, date=today)[0]
payment.identifier = 999
payment.group.payer = client.person
# Call
self.create | _call(client.person)
dialog = ClientDetailsDialog(self.store, client)
self.check_editor(dialog, 'dialog-client-details')
@mock.patch('stoqlib.gui.dialogs.clientdetails.run_person_role_dialog')
def test_further_details(self, run_dialog):
client = self.create_client()
dialog = ClientDetailsDialog(self.store, client)
| new_store = 'stoqlib.gui.dialogs.clientdetails.api.new_store'
with mock.patch(new_store) as new_store:
with mock.patch.object(self.store, 'close'):
new_store.return_value = self.store
self.click(dialog.further_details_button)
args, kwargs = run_dialog.call_args
editor, d, store, model = args
self.assertEquals(editor, ClientEditor)
self.assertEquals(d, dialog)
self.assertEquals(model, dialog.model)
self.assertTrue(isinstance(store, StoqlibStore))
self.assertEquals(kwargs.pop('visual_mode'), True)
self.assertEquals(kwargs, {})
@mock.patch('stoqlib.gui.dialogs.clientdetails.run_dialog')
@mock.patch('stoqlib.gui.dialogs.clientdetails.api.new_store')
@mock.patch('stoqlib.gui.slaves.saleslave.return_sale')
def test_tab_details(self, return_sale, new_store, run_dialog):
new_store.return_value = self.store
client = self.create_client()
sale = self.create_sale(client=client)
self.create_sale_item(sale, product=True)
self.create_payment(payment_type=Payment.TYPE_IN, group=sale.group)
sale.order()
sale.confirm()
sale2 = self.create_sale(client=client)
self.create_returned_sale(sale2)
self.create_workorder(client=client)
dialog = ClientDetailsDialog(self.store, client)
# Test Sales tab details button
sales_tab = dialog.details_notebook.get_nth_page(0)
sales_tab.klist.select(sales_tab.klist[0])
self.click(sales_tab.button_box.details_button)
args, kwargs = run_dialog.call_args
self.assertEquals(args[0], SaleDetailsDialog)
self.assertTrue(isinstance(kwargs['model'], SaleView))
# Test Sales tab return button
sales_tab = dialog.details_notebook.get_nth_page(0)
sales_tab.klist.select(sales_tab.klist[0])
sale_view = sales_tab.klist[0]
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
self.click(sales_tab.button_box.return_button)
return_sale.assert_called_once_with(sales_tab.get_toplevel(),
sale_view.sale, self.store)
# Test Returned Sales tab details button
returned_sales_tab = dialog.details_notebook.get_nth_page(1)
returned_sales_tab.klist.select(returned_sales_tab.klist[0])
self.click(returned_sales_tab.button_box.details_button)
args, kwargs = run_dialog.call_args
self.assertEquals(args[0], SaleDetailsDialog)
self.assertTrue(isinstance(kwargs['model'], SaleView))
# Test Work Orders tab details button
work_orders_tab = dialog.details_notebook.get_nth_page(4)
work_orders_tab.klist.select(work_orders_tab.klist[0])
self.click(work_orders_tab.button_box.details_button)
args, kwargs = run_dialog.call_args
self.assertEquals(args[0], WorkOrderEditor)
self.assertTrue(isinstance(kwargs['model'], WorkOrder))
# Test Payment tab details button
payments_tab = dialog.details_notebook.get_nth_page(5)
payments_tab.klist.select(payments_tab.klist[0])
self.click(payments_tab.button_box.details_button)
args, kwargs = run_dialog.call_args
self.assertEquals(args[0], InPaymentEditor)
self.assertTrue(isinstance(kwargs['model'], Payment))
if __name__ == '__main__':
from stoqlib.api import api
c = api.prepare_test()
unittest.main()
|
frankrousseau/weboob | modules/lutim/browser.py | Python | agpl-3.0 | 1,496 | 0.000668 | # -*- co | ding: utf-8 -*-
# Copyright(C) 2014 Vincent A
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FI | TNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import Browser
from StringIO import StringIO
import re
from .pages import PageAll
__all__ = ['LutimBrowser']
class LutimBrowser(Browser):
ENCODING = 'utf-8'
def __init__(self, base_url, *args, **kw):
Browser.__init__(self, *args, **kw)
self.base_url = base_url
self.PAGES = {re.escape(self.base_url): PageAll}
def post(self, name, content, max_days):
self.location(self.base_url)
assert self.is_on_page(PageAll)
self.select_form(nr=0)
self.form['delete-day'] = [str(max_days)]
self.form.find_control('file').add_file(StringIO(content), filename=name)
self.submit()
assert self.is_on_page(PageAll)
return self.page.get_info()
|
LeResKP/sqla-taskq | docs/conf.py | Python | mit | 9,266 | 0.006046 | # -*- coding: utf-8 -*-
#
# sqla-taskq documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 12 07:55:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sqla-taskq'
copyright = u'2015, Aurélien Matouillot'
author = u'Aurélien Matouillot'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following langu | ages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'defau | lt'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sqla-taskqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sqla-taskq.tex', u'sqla-taskq Documentation',
u'Aurélien Matouillot', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manua |
abau171/highfive | examples/sum_worker.py | Python | mit | 1,495 | 0.001338 | import time
import random
import highfive
# This is the remote worker for the sum example. Here, we define what the
# workers do when they get a call from the master. All we need is a single
# function which takes the call, does some processing, and returns a response.
# An interesting way to play with the workers is to spin some up, then shut
# them down before the job set running on the master is complete. The jobs
# which the workers are running will be requeued on the master so that when
# more workers connect, the jobs will be tried again. This makes network
# problems no big deal as long as you reconnect the workers at some point.
# In our case, we take in a pair of numbers and return their sum. To make
# it easier to watch the progress of the job set in real time, we sleep for
# anywhere between 0 and 1/4 seconds before the sum to simulate heavy
# processing.
def delayed_sum(numbers):
time.sleep(random.random() / 4)
return sum(numbers)
# Now we can easily start a worker pool to connect to a local HighFive master.
# We can also add a `host=<host name>` and `port=<port number>` to connect to a
# remote HighFive master. By default, `run_worker_pool()` creates a worker
# process for each available CPU core to maximize CPU utilization, but we can
# we can limit this with `max_workers=<number of workers>`.
if __ | name__ == "__main__":
try:
highfive.run_worker_pool(delayed_sum)
except Keyboard | Interrupt:
print("keyboard interrupt")
|
JavierGarciaD/AlgoTrader | algotrader/log/log_settings.py | Python | mit | 2,265 | 0.000883 | # coding=utf-8
"""
https://docs.python.org/3/howto/logging.html
https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/
"""
import logging
import logging.config
from logging import Formatter
import pathlib
import yaml
import time
from common.settings import AlgoSettings
def setup_logging(default_level=logging.INFO):
"""Setup logging configuration
:param default_level:
:return:
"""
path = AlgoSettings().log_configuration()
path = pathlib.Path(path)
try:
with open(path, 'rt') as my_file:
config = yaml.safe_load(my_file.read())
logging.config.dictConfig(config)
except:
logging.basicConfig(level=default_level)
raise SystemError
def update_conf_file():
"""Update the logging configuration file with the paths
defined in the CONFIG file
"""
sett = AlgoSettings()
saving_path = pathlib.Path(sett.log_saving_path())
config_file = pathlib.Path(sett.log_configuration())
with open(config_file) as my_file:
doc = yaml.load(my_file)
doc['handlers']['info_file_handler']['filename'] = \
str(saving_path / 'bsk_info.log')
doc['handlers']['error_file_handler']['filename'] = \
str(saving_path / 'bsk_error.log')
with open(config_file, 'w') as my_file:
yaml.dump(doc, my_file)
class UTCFormatter(Formatter):
converter = time.gmtime
def log_title(msg):
""" | Format for log titles
:param msg:
:return:
"""
total_len = 120
len_msg = len(msg)
sides_space = (total_len-len_msg) - 2
l_space = sides | _space / 2
r_space = l_space
if sides_space % 2 != 0:
r_space += 1
l_str = int(l_space) * " "
r_str = int(r_space) * " "
complete_line = ('#' * total_len)
msg_line = ('#' + l_str + msg + r_str + '#')
space_line = "#" + (" " * (total_len - 2)) + "#"
logging.info(complete_line)
logging.info(space_line)
logging.info(msg_line)
logging.info(space_line)
logging.info(complete_line)
def log_test():
x = 1
print(x)
logger.info('Var: {}'.format(x))
return x
if __name__ == '__main__':
# setup_logging()
# logger = logging.getLogger('log_config')
# log_test()
update_conf_file() |
kavdev/dj-stripe | tests/apps/testapp_content/urls.py | Python | mit | 220 | 0.004545 | """
Represents protected content
"""
from dja | ngo.conf.urls import url
from django.http import HttpResponse
def testview(request):
return HttpResponse()
urlpatterns = [url(r"^$", testv | iew, name="test_url_content")]
|
linkedin/WhereHows | metadata-ingestion/src/datahub/ingestion/source/lookml.py | Python | apache-2.0 | 45,412 | 0.002004 | import glob
import importlib
import itertools
import logging
import pathlib
import re
import sys
from dataclasses import dataclass
from dataclasses import field as dataclass_field
from dataclasses import replace
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type
import pydantic
from looker_sdk.error import SDKError
from looker_sdk.sdk.api31.methods import Looker31SDK
from looker_sdk.sdk.api31.models import DBConnection
from pydantic import root_validator, validator
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.source.looker_common import (
LookerCommonConfig,
LookerUtil,
LookerViewId,
ViewField,
ViewFieldType,
)
from datahub.metadata.schema_classes import (
ChangeTypeClass,
DatasetPropertiesClass,
SubTypesClass,
)
from datahub.utilities.sql_parser import SQLParser
if sys.version_info >= (3, 7):
import lkml
else:
raise ModuleNotFoundError("The lookml plugin requires Python 3.7 or newer.")
import datahub.emitter.mce_builder as builder
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern, ConfigurationError
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.looker import LookerAPI, LookerAPIConfig
from datahub.metadata.com.linkedin.pegasus2avro.common import BrowsePaths, Status
from datahub.metadata.com.linkedin.pegasus2avro.dataset import (
DatasetLineageTypeClass,
UpstreamClass,
UpstreamLineage,
ViewProperties,
)
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import DatasetSnapshot
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
assert sys.version_info[1] >= 7 # needed for mypy
logger = logging.getLogger(__name__)
def _get_bigquery_definition(
looker_connection: DBConnection,
) -> Tuple[str, Optional[str], Optional[str]]:
platform = "bigquery"
# bigquery project ids are returned in the host field
db = looker_connection.host
schema = looker_connection.database
return | (platform, d | b, schema)
def _get_generic_definition(
looker_connection: DBConnection, platform: Optional[str] = None
) -> Tuple[str, Optional[str], Optional[str]]:
if platform is None:
# We extract the platform from the dialect name
dialect_name = looker_connection.dialect_name
assert dialect_name is not None
# generally the first part of the dialect name before _ is the name of the platform
# versions are encoded as numbers and can be removed
# e.g. spark1 or hive2 or druid_18
platform = re.sub(r"[0-9]+", "", dialect_name.split("_")[0])
assert (
platform is not None
), f"Failed to extract a valid platform from connection {looker_connection}"
db = looker_connection.database
schema = looker_connection.schema # ok for this to be None
return (platform, db, schema)
class LookerConnectionDefinition(ConfigModel):
platform: str
default_db: str
default_schema: Optional[str] # Optional since some sources are two-level only
@validator("*")
def lower_everything(cls, v):
"""We lower case all strings passed in to avoid casing issues later"""
if v is not None:
return v.lower()
@classmethod
def from_looker_connection(
cls, looker_connection: DBConnection
) -> "LookerConnectionDefinition":
"""Dialect definitions are here: https://docs.looker.com/setup-and-management/database-config"""
extractors: Dict[str, Any] = {
"^bigquery": _get_bigquery_definition,
".*": _get_generic_definition,
}
if looker_connection.dialect_name is not None:
for extractor_pattern, extracting_function in extractors.items():
if re.match(extractor_pattern, looker_connection.dialect_name):
(platform, db, schema) = extracting_function(looker_connection)
return cls(platform=platform, default_db=db, default_schema=schema)
raise ConfigurationError(
f"Could not find an appropriate platform for looker_connection: {looker_connection.name} with dialect: {looker_connection.dialect_name}"
)
else:
raise ConfigurationError(
f"Unable to fetch a fully filled out connection for {looker_connection.name}. Please check your API permissions."
)
class LookMLSourceConfig(LookerCommonConfig):
base_folder: pydantic.DirectoryPath
connection_to_platform_map: Optional[Dict[str, LookerConnectionDefinition]]
model_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
view_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
parse_table_names_from_sql: bool = False
sql_parser: str = "datahub.utilities.sql_parser.DefaultSQLParser"
api: Optional[LookerAPIConfig]
project_name: Optional[str]
@validator("connection_to_platform_map", pre=True)
def convert_string_to_connection_def(cls, conn_map):
# Previous version of config supported strings in connection map. This upconverts strings to ConnectionMap
for key in conn_map:
if isinstance(conn_map[key], str):
platform = conn_map[key]
if "." in platform:
platform_db_split = conn_map[key].split(".")
connection = LookerConnectionDefinition(
platform=platform_db_split[0],
default_db=platform_db_split[1],
default_schema="",
)
conn_map[key] = connection
else:
logger.warning(
f"Connection map for {key} provides platform {platform} but does not provide a default database name. This might result in failed resolution"
)
conn_map[key] = LookerConnectionDefinition(
platform=platform, default_db="", default_schema=""
)
return conn_map
@root_validator()
def check_either_connection_map_or_connection_provided(cls, values):
"""Validate that we must either have a connection map or an api credential"""
if not values.get("connection_to_platform_map", {}) and not values.get(
"api", {}
):
raise ConfigurationError(
"Neither api not connection_to_platform_map config was found. LookML source requires either api credentials for Looker or a map of connection names to platform identifiers to work correctly"
)
return values
@root_validator()
def check_either_project_name_or_api_provided(cls, values):
"""Validate that we must either have a project name or an api credential to fetch project names"""
if not values.get("project_name") and not values.get("api"):
raise ConfigurationError(
"Neither project_name not an API credential was found. LookML source requires either api credentials for Looker or a project_name to accurately name views and models."
)
return values
@dataclass
class LookMLSourceReport(SourceReport):
models_discovered: int = 0
models_dropped: List[str] = dataclass_field(default_factory=list)
views_discovered: int = 0
views_dropped: List[str] = dataclass_field(default_factory=list)
def report_models_scanned(self) -> None:
self.models_discovered += 1
def report_views_scanned(self) -> None:
self.views_discovered += 1
def report_models_dropped(self, model: str) -> None:
self.models_dropped.append(model)
def report_views_dropped(self, view: str) -> None:
self.views_dropped.append(view)
@dataclass
class LookerModel:
connection: str
includes: List[str]
explores: List[dict]
resolved_includes: List[str]
@staticmethod
def from_looker_dict(
looker_model_dict: dict,
base_ |
scottp-dpaw/gsconfig | examples/postgislayers.py | Python | mit | 662 | 0.007553 | #!/usr/bin/env python
'''
gsconfig is a python library for manipulating a GeoServer instance via the GeoServer RESTConfig API.
The project is distributed under a MIT License .
'''
__author__ = "David Winslow"
__copyright__ = "Copyright 2012-2015 Boundless, Copyright 2010-2012 OpenPlans"
__license__ = "MIT"
from geoserver.catalog import Catalog
cat = Catalog("http://localhost:8080/geoserver/rest", "admin", "geoserver")
pg_stores = [s for s in cat.get_stores()
if s.connection_parameters and \
s.connection_parameters.get(" | dbtype") == "post | gis"]
res = []
for s in pg_stores:
res.extend(r.name for r in cat.get_resources(store=s))
print res
|
supracd/pygal | pygal/maps/__init__.py | Python | lgpl-3.0 | 813 | 0 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License | , or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. S | ee the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Maps extensions namespace module"""
|
Dioptas/Dioptas | dioptas/controller/integration/phase/PhaseInCakeController.py | Python | gpl-3.0 | 5,615 | 0.00285 | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License |
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ....model.util.HelperModule import get_partial_index
# imports for type hinting in PyCharm -- DO NOT DELETE
from ....model.DioptasModel import | DioptasModel
from ....widgets.integration import IntegrationWidget
from ....widgets.plot_widgets.ImgWidget import IntegrationImgWidget
class PhaseInCakeController(object):
"""
PhaseInCakeController handles all the interaction between the phase controls and the plotted lines in the cake view.
"""
def __init__(self, integration_widget, dioptas_model):
"""
:param integration_widget: Reference to an IntegrationWidget
:param dioptas_model: reference to DioptasModel object
:type integration_widget: IntegrationWidget
:type dioptas_model: DioptasModel
"""
self.model = dioptas_model
self.phase_model = self.model.phase_model
self.integration_widget = integration_widget
self.cake_view_widget = integration_widget.integration_image_widget.cake_view # type: IntegrationImgWidget
self.connect()
def connect(self):
self.phase_model.phase_added.connect(self.add_phase_plot)
self.model.phase_model.phase_removed.connect(self.cake_view_widget.del_cake_phase)
self.phase_model.phase_changed.connect(self.update_phase_lines)
self.phase_model.phase_changed.connect(self.update_phase_color)
self.phase_model.phase_changed.connect(self.update_phase_visible)
self.phase_model.reflection_added.connect(self.reflection_added)
self.phase_model.reflection_deleted.connect(self.reflection_deleted)
def get_phase_position_and_intensities(self, ind, clip=True):
"""
Obtains the positions and intensities for lines of a phase with an index ind within the cake view.
No clipping is used for the first call to add the CakePhasePlot to the ImgWidget. Subsequent calls are used with
clipping. Thus, only lines within the cake_tth are returned. The visibility of each line is then estimated in
the ImgWidget based on the length of the clipped and not clipped lists.
:param ind: the index of the phase
:param clip: whether or not the lists should be clipped. Clipped means that lines which have positions larger
than the
:return: line_positions, line_intensities
"""
if self.model.cake_tth is None:
cake_tth = self.model.calibration_model.tth
else:
cake_tth = self.model.cake_tth
reflections_tth = self.phase_model.get_phase_line_positions(ind, 'tth',
self.model.calibration_model.wavelength * 1e10)
reflections_intensities = [reflex[1] for reflex in self.phase_model.reflections[ind]]
cake_line_positions = []
cake_line_intensities = []
for ind, tth in enumerate(reflections_tth):
pos_ind = get_partial_index(cake_tth, tth)
if pos_ind is not None:
cake_line_positions.append(pos_ind + 0.5)
cake_line_intensities.append(reflections_intensities[ind])
elif clip is False:
cake_line_positions.append(0)
cake_line_intensities.append(reflections_intensities[ind])
return cake_line_positions, cake_line_intensities
def add_phase_plot(self):
cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(-1, False)
self.cake_view_widget.add_cake_phase(cake_line_positions, cake_line_intensities,
self.phase_model.phase_colors[-1])
def update_phase_lines(self, ind):
cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(ind)
self.cake_view_widget.update_phase_intensities(ind, cake_line_positions, cake_line_intensities)
def update_phase_color(self, ind):
self.cake_view_widget.set_cake_phase_color(ind, self.model.phase_model.phase_colors[ind])
def update_phase_visible(self, ind):
if self.phase_model.phase_visible[ind] and self.integration_widget.img_mode == 'Cake' and \
self.integration_widget.img_phases_btn.isChecked():
self.cake_view_widget.show_cake_phase(ind)
else:
self.cake_view_widget.hide_cake_phase(ind)
def reflection_added(self, ind):
self.cake_view_widget.phases[ind].add_line()
def reflection_deleted(self, phase_ind, reflection_ind):
self.cake_view_widget.phases[phase_ind].delete_line(reflection_ind)
|
googleapis/python-secret-manager | samples/snippets/iam_grant_access.py | Python | apache-2.0 | 2,075 | 0.001446 | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""
command line application and sample code for granting access to a secret.
"""
import argparse
# [START secretmanager_iam_grant_access]
def iam_grant_access(project_id, secret_id, member):
"""
Grant the given member access to a secret.
"""
# Import the | Secret Manager client library.
from google.cloud import secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret.
name = client.secret_path(project_id, secret_id)
# Get the current IAM policy.
policy = client.get_iam_policy(reques | t={"resource": name})
# Add the given member with access permissions.
policy.bindings.add(role="roles/secretmanager.secretAccessor", members=[member])
# Update the IAM Policy.
new_policy = client.set_iam_policy(request={"resource": name, "policy": policy})
# Print data about the secret.
print("Updated IAM policy on {}".format(secret_id))
# [END secretmanager_iam_grant_access]
return new_policy
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("project_id", help="id of the GCP project")
parser.add_argument("secret_id", help="id of the secret to get")
parser.add_argument("member", help="member to grant access")
args = parser.parse_args()
iam_grant_access(args.project_id, args.secret_id, args.member)
|
mabhub/Geotrek | geotrek/core/admin.py | Python | bsd-2-clause | 1,125 | 0 | from django.contrib import admin
from geotrek.core.models import (PathSource, Stake, Usage, Network, Comfort)
class PathSourceAdmin(admin.ModelAdmin):
list_display = ('source', 'structure')
search_fields = ('source', 'structure')
list_filter = ('structure',)
class StakeAdmin(admin.ModelAdmin):
list_display = ('stake', 'structure')
search_fields = ('stake', 'structure')
list_filter = ('structure',)
class UsageAdmin(admin.ModelAdm | in):
list_display = ('usage', 'structure')
search_fields = ('usage', 'structure')
list_filter = ('structure',)
class NetworkAdmin(admin.ModelAdmin):
list_display = ('network', 'structure')
search_fields = ('network', 'structure')
list_filter = ('structure',)
class ComfortAdmin(admin.ModelAdmin):
list_display = ('comfort', 'structure')
search_fields = ('comfort', 'structure')
list_filter = ('structure',)
admin.site.register(PathSource, PathSourceAdmin)
admin.site.register(Stake, StakeAdmin)
admin.site.register(Usage, UsageAdmin)
admin.site.register(Network, NetworkAdmin)
admin.site.register(Comfort, ComfortAdmin)
| |
nickzuber/ClusterRunner | app/util/ordered_set_queue.py | Python | apache-2.0 | 2,158 | 0.000463 | import collections
from queue import Queue
class OrderedSetQueue(Queue):
"""
A queue based on ordered set. This behaves just like a normal queue but does not allow the same item to be in the
queue more than once.
"""
def _init(self, maxsize):
self.queue = OrderedSet()
def _put(self, | item):
self.queue.add(item)
def _get(self):
return self.queue.pop()
class | OrderedSet(collections.MutableSet):
"""
Set that remembers original insertion order.
Code from http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, nxt = self.map.pop(key)
prev[2] = nxt
nxt[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
|
pandas-dev/pandas | pandas/core/indexers/objects.py | Python | bsd-3-clause | 12,816 | 0.001951 | """Indexer objects for computing start/end window bounds for rolling operations"""
from __future__ import annotations
from datetime import timedelta
import numpy as np
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
.. versionadded:: 1.5
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs
):
"""
Parameters
----------
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class FixedWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of fixed length."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if center:
offset = (self.window_size - 1) // 2
else:
offset = 0
end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64")
start = end - self.window_size
if closed in ["left", "both"]:
start -= 1
if closed in ["left", "neither"]:
end -= 1
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
return start, end
class VariableWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
# error: Argument 4 to "calculate_variable_window_bounds" has incompatible
# type "Optional[bool]"; expected "bool"
# error: Argument 6 to "calculate_variable_window_bounds" has incompatible
# type "Optional[ndarray]"; expected "ndarray"
return calculate_variable_window_bounds(
num_values,
self.window_size,
min_periods,
center, # type: ignore[arg-type]
closed,
self.index_array, # type: ignore[arg-type]
)
class VariableOffsetWindowIndexer(BaseIndexer):
"""Calculate window boundaries based on a non-fixed offset such as a BusinessDay."""
def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int = 0,
index=None,
offset=None,
**kwargs,
):
super().__init__(index_array, window_size, **kwargs)
self.index = index
self.offset = offset
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError("step not implemented for variable offset window")
if num_values <= 0:
return np.empty(0, dtype="int64"), np.empty(0, dtype="int64")
# if windows is variable, default is 'right', otherwise default is 'both'
if closed is None:
closed = "right" if self.index is not None else "both"
right_closed = closed in ["right", "both"]
left_closed = closed in ["left", "both"]
if self.index[num_values - 1] < self.index[0]:
index_growth_sign = -1
else:
index_growth_sign = 1
start = np.empty(num_values, dtype="int64")
start.fill(-1)
end = np.empty(num_values, dtype="int64")
end.fill(-1)
start[0] = 0
# right endpoint is closed
if right_closed:
end[0] = 1
# right endpoint is open
else:
end[0] = 0
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
start_bound = self.index[i] - index_growth_sign * self.offset
# left endpoint is closed
if left_closed:
start_bound -= Nano(1)
# advance the start bound until we are
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
start[i] = j
break
# end bound is previous end
# or current index
if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
end[i] = i + 1
else:
end[i] = end[i - 1]
# right endpoint is open
if not right_closed:
end[i] -= 1
return start, end
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
return (
np.zeros(num_values, dtype=np.int64),
np.arange(1, num_values + 1, dtype=np.int64),
)
class FixedForwardWindowIndexer(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the
current row.
Examples
--------
>>> d | f = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
@Appender(get_window_bounds_doc)
| def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
|
amentis/Rexi | RxAPI/RxGUI/Border.py | Python | apache-2.0 | 1,108 | 0.002708 | __author__ = 'amentis'
from RxAPI.RxGUI import Color, RxGUIObject
class Border(RxGUIObject):
"""
Drawable border for any d | rawable RxGUIObject.
"""
def __init__(self, name, color=None, style | ="solid", width="1px"):
"""
@param name: str name of the REXI object
@param color: Color color of the border
@param style: str style of the border, acceptable values:
none, hidden, dotted, dashed, solid, double, groove, ridge, inset, outset, inherit
@param width: str width of the border, acceptable values:
thin, medium, thick, N%/px/cm/etc., inherit
"""
RxGUIObject.__init__(self, name, None)
self._style = style
self._width = width
if color is None:
self._color = Color.Color('borderColor')
self._color.color_by_name('Black')
else:
self._color = Color.Color('borderColor', color)
def get(self):
"""
@return : str CSS code defining the border
"""
return "border: %s %s %s ;" % (self._width, self._style, self._color.get()) |
ipapusha/amnet | amnet/opt.py | Python | bsd-3-clause | 13,435 | 0.001638 | from __future__ import division
import itertools
from enum import Enum
import numpy as np
import amnet
from amnet.util import Relation
import z3
################################################################################
# Classes and routines related to Multiplexing Programming with AMNs
################################################################################
############
# Objective
############
class Objective(object):
"""
Class that keeps track of:
flag to minimize or maximize
real-valued Amn instance (ensures 1-dimensional output)
unique variable input
"""
def __init__(self, phi, minimize=True):
assert phi.outdim == 1
self.minimize = minimize
self.phi = phi
self.variable = amnet.tree.unique_leaf_of(phi)
self.is_negated = False
def negate(self):
assert self.phi.outdim == 1
self.phi = amnet.atoms.negate(self.phi)
self.is_negated = (not self.is_negated)
self.minimize = (not self.minimize)
def __repr__(self):
return "Objective(phi=%s, minimize=%s)" % (repr(self.phi), repr(self.minimize))
class Minimize(Objective):
def __init__(self, phi):
super(Minimize, self).__init__(phi, minimize=True)
class Maximize(Objective):
def __init__(self, phi):
super(Maximize, self).__init__(phi, minimize=False)
##############
# Constraints
##############
class Constraint(object):
"""
Class that keeps track of the lhs, rhs, the type of relation,
and ensures dimensionality coherence between the lhs and rhs
"""
def __init__(self, lhs, rhs, rel):
# supported relations
assert rel in [Relation.LT, Relation.LE, Relation.GT, Relation.GE, Relation.EQ, Relation.NE]
self.rel = rel
# lhs and rhs must be an Amn
assert isinstance(lhs, amnet.Amn)
self.lhs = lhs
assert isinstance(rhs, amnet.Amn)
self.rhs = rhs
# at this point both self.lhs and self.rhs are valid Amn's
assert self.lhs.outdim == self.rhs.outdim
assert self.lhs.outdim >= 1
# cache input variable reference
# XXX: possibly move this check into the problem creation routines
lhs_variable = amnet.tree.unique_leaf_of(self.lhs)
rhs_variable = amnet.tree.unique_leaf_of(self.rhs)
assert lhs_variable is rhs_variable, 'LHS and RHS must depend on the same Variable'
self.variable = lhs_variable
def __repr__(self):
return "Constraint(lhs=%s, rhs=%s, rel=%s)" % (repr(self.lhs), repr(self.rhs), repr(self.rel))
##########
# Problem
##########
class OptResultCode(Enum):
SUCCESS = 1
FAILURE = 2
INFEASIBLE = 3
UNBOUNDED_BELOW = 4 # not yet implemented
UNBOUNDED_ABOVE = 5 # not yet implemented
MAX_ITER = 6
class OptResult(object):
def __init__(self, objval, value, code=OptResultCode.SUCCESS, model=None):
self.objval = objval
self.value = value
self.status = code
self.model = model
def __repr__(self):
return "OptResult(objval=%s, value=%s, status=%s, model=%s)" % \
(repr(self.objval),
repr(self.value),
repr(self.status),
repr(self.model))
class OptOptions(object):
def __init__(self):
# default options
self.obj_lo = -float(2**20)
self.obj_hi = float(2**20)
self.fptol = float(2**-1)
self.verbosity = 2
self.max_iter = 100
class Problem(object):
# Objective (or constant)
# list of Constraints
# solve()
# single variable
def __init__(self, objective, constraints=None, options=None, solver=None):
assert objective is not None
self.objective = objective
self.constraints = [] if constraints is None else constraints
self.options = OptOptions() if options is None else options # default options
# default objective is zero for a feasibility problem
if objective is None:
assert len(constraints) >= 1
self.variable = constraints[0].variable
se | lf.objective = amnet.atoms.zero_from(self.variable, dim=1)
else:
self.variable = self.objective.variable
# ensure the leaf variable is the same across con | straints
assert all([constraint.variable is self.variable
for constraint in self.constraints])
# initialize default solver if necessary
if solver is None:
self.solver = z3.Solver()
else:
self.solver = solver
self.enc_list = []
# initialize objective and constraint relations into the solver
self._init_objective_constraints()
self._encode_constraint_relations()
def eval_feasible(self, xinp):
""" returns whether or not xinp satisfies the constraints """
assert len(xinp) == self.objective.phi.indim
feas = True
for constraint in self.constraints:
lhsval = constraint.lhs.eval(xinp)
rhsval = constraint.rhs.eval(xinp)
rel = constraint.rel
# TODO: implement fptol here
if rel == Relation.LT:
feases = [l < r for (l, r) in itertools.izip(lhsval, rhsval)]
elif rel == Relation.LE:
feases = [l <= r for (l, r) in itertools.izip(lhsval, rhsval)]
elif rel == Relation.GT:
feases = [l > r for (l, r) in itertools.izip(lhsval, rhsval)]
elif rel == Relation.GE:
feases = [l >= r for (l, r) in itertools.izip(lhsval, rhsval)]
elif rel == Relation.EQ:
feases = [l == r for (l, r) in itertools.izip(lhsval, rhsval)]
elif rel == Relation.NE:
feases = [l != r for (l, r) in itertools.izip(lhsval, rhsval)]
else:
assert False, 'Impossible relation'
assert len(lhsval) == len(feases)
feas = feas and all(feases)
# short-circuit
if not feas: return feas
return feas
def _init_objective_constraints(self):
assert self.solver is not None
assert self.objective.phi.outdim == 1
# Amn instances for this problem are encoded in a particular order
amn_list = list(itertools.chain(
[self.objective.phi],
[constraint.lhs for constraint in self.constraints],
[constraint.rhs for constraint in self.constraints]
))
assert len(amn_list) >= 1
assert (not (len(self.constraints) == 0)) or (len(amn_list) == 1)
assert (not (len(self.constraints) > 0)) or (len(amn_list) > 1)
# encode them into a single Smt encoder
enc_list = amnet.smt.SmtEncoder.multiple_encoders_for(
phi_list=amn_list,
solver=self.solver,
push_between=False
)
assert len(enc_list) == len(amn_list)
assert len(enc_list) >= 1
assert all([enc.solver is self.solver for enc in enc_list])
# set the encoder lists after encoding from Smt encoder
self.enc_list = enc_list
self.enc_objective = enc_list[0]
assert (len(enc_list) - 1) % 2 == 0
ncons = (len(enc_list) - 1) // 2
self.enc_lhs_list = enc_list[1:1+ncons]
self.enc_rhs_list = enc_list[1+ncons:1+2*ncons]
assert len(self.enc_lhs_list) == len(self.constraints)
assert len(self.enc_rhs_list) == len(self.constraints)
assert 1 + len(self.enc_lhs_list) + len(self.enc_rhs_list) == len(self.enc_list)
def _encode_constraint_relations(self):
assert self.solver is not None
assert len(self.enc_list) >= 1
# input variable amn reference
x = self.enc_objective.var_of_input()
# Amn instances for this problem are encoded in a particular order
for i in xrange(len(self.constraints)):
phi_lhs = self.constraints[i].lhs
phi_rhs = self.constraints[i].rhs
enc_lhs = self.enc_lhs_list[i]
enc_rhs = self.enc_rhs_list[i]
rel = self.con |
tuffery/Frog2 | frowns/build/lib/frowns/IdGenerator.py | Python | gpl-3.0 | 715 | 0.006993 | """IdGenerator
A simple class to provide a sequence of integers that can be used
for identification | purposes
generator = IdGenerator()
generator() -> returns 0
generator() -> returns 1
...
generator = IdGenerator(1000)
generator() -> returns 1000
generator() -> returns 1001
"""
class IdGenerator:
def __init__(self, start=1):
self.start = start-1
def __call__(self):
self.start += 1
return self.start
defaultGenerator = IdGenerator()
d | ef test():
generator = IdGenerator()
assert generator() == 1
assert generator() == 2
generator = IdGenerator(start=1000)
assert generator() == 1000
assert generator() == 1001
if __name__ == "__main__":
test()
|
andrewedstrom/cs638project | arkive table analysis/parse.py | Python | mit | 5,243 | 0.008392 | import os.path
import requests
import time
from bs4 import BeautifulSoup
from geotext import GeoText as gt
from string import punctuation
from collections import Counter
import re
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
threats = ['loss', 'fragmentation', 'hunting', 'poaching', 'fishing', 'overfishing', 'environmental', 'environment', 'invasive', 'disease', 'pet', 'pollution']
conservation = ['cites', 'protection law', 'captive breeding', 'protected', 'endangered species act', 'wwf', 'wcs']
conservationString = ''
threatString = ''
def findConservation(string):
consFound = []
string = string.lower()
string = string.replace("<p>", "")
global conservation
for word in conservation:
if word in string:
consFound.append(word)
return consFound
def findThreats(string):
threatsFound = []
string = string.lower()
string = string.replace("<p>", "")
global threats
for word in threats:
if word in string:
threatsFound.append(word)
index = string.index(word)
return threatsFound
def parseThrough(string):
string = string.replace(',','')
s = '<p>'
if s in string:
string = string.split(s)[1]
s = '</p>'
if s in string:
string = string.split(s)[0]
return string
def urlNeeded():
global threats
global conservationString
global threatString
allThreats = []
global conservation
allCons = []
f = open('output.txt', "w")
f.write('Scientific Name, Nickname, Common Name, Kingdom, Phylum, Class, Order, Family, Genus, Size, Threats, Conservation, Threat Keywords, Conservation Keywords, status, countries, country_count' + '\n')
with open('test.txt', "rb") as fd:
for line in fd:
line = line.lstrip().rstrip()
url = line
r = requests.get(url)
soup = BeautifulSoup(r.text.encode('utf-8'), 'html.parser')
newName = soup.find('td').text
newName = newName.lstrip().rstrip()
newName = str(newName)
newName = newName.replace(',',';')
f.write(newName + ',')
for t in soup.findAll('h1'):
name = t.text
s = '('
if s in name:
commonName = name.split(s)[0]
scienceName = name.split(s)[1]
scienceName = scienceName.replace(')','')
f.write(scienceName + ',')
print scienceName
f.write(name + ',')
soupsup = soup.findAll('td', align="left")
for node in soupsup:
waant = ''.join(node.findAll(text=True))
waant = str(waant)
waant = waant.replace('\n', '')
f.write(waant + ',')
if "(" in node:
break
items = []
for t in soup.findAll('td'):
items.append(t.text)
check = 9
| badge = len(items)
if badge > 6:
f.write(items[badge - 1] + ',')
else:
f.write(',')
badges = soup.findAll("p" | , class_="Threats")
ofInterest = str(badges)
foundThreats = findThreats(ofInterest)
ofInterest = parseThrough(ofInterest)
threatString = threatString + ofInterest
if ofInterest:
f.write(ofInterest)
f.write(',')
else:
f.write(' ,')
badges = soup.findAll("p", class_="Conservation")
ofInterest = str(badges)
foundCons = findConservation(ofInterest)
ofInterest = parseThrough(ofInterest)
conservationString = conservationString + ofInterest
badges = soup.findAll("p", class_="Range")
badges = str(badges)
countries = gt(badges).country_mentions
countries = str(countries)
#countries = re.sub('[^A-Z]', '', s)
countries = countries.replace(',', '')
cCount = sum(c.isdigit() for c in countries)
cCount = str(cCount)
print cCount
status = soup.findAll("p", class_="Status")
status = str(status)
if 'Critically' in status:
status = 'Critically Endangered'
else:
status = 'Endangered'
if ofInterest:
f.write(ofInterest)
f.write(' ,' + '')
else:
f.write(' ,')
for node in foundThreats:
f.write(node)
f.write(';')
f.write(' ,')
for node in foundCons:
f.write(node)
f.write(';')
f.write(' ,')
f.write(status)
f.write(',')
f.write(countries)
f.write(',')
f.write(cCount)
f.write('\n')
fd.close()
f.close()
def main():
urlNeeded()
main() |
intelxed/xed | pysrc/read_xed_db.py | Python | apache-2.0 | 33,709 | 0.008366 | #!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import sys
import re
import collections
import patterns
import slash_expand
import genutil
import opnd_types
import opnds
import cpuid_rdr
import map_info_rdr
def die(s):
sys.stdout.write("ERROR: {0}\n".format(s))
sys.exit(1)
def msgb(b,s=''):
sys.stderr.write("[{0}] {1}\n".format(b,s))
class inst_t(object):
def __init__(self):
pass
def __str__(self):
s = []
for fld in sorted(self.__dict__.keys()):
s.append("{}: {}".format(fld,getattr(self,fld)))
return "\n".join(s) + '\n'
def get_eosz_list(self):
if self.space == 'legacy':
if hasattr(self,'attributes'):
if 'BYTEOP' in self.attributes:
return [8]
if hasattr(self,'eosz'):
if self.eosz == 'oszall':
return [16,32,64]
if self.eosz == 'osznot16':
return [32,64]
if self.eosz == 'osznot64':
return [16,32]
if self.eosz == 'o16':
return [16]
if self.eosz == 'o32':
return [32]
if self.eosz == 'o64':
return [64]
die("Could not handle eosz {}".format(self.eosz))
die("Did not find eosz for {}".format(self.iclass))
else: # vex, evex, xop
return None
class width_info_t(object):
def __init__(self, n | ame, dtype, widths):
""" a name and a dict of widths indexed by osz 8, 16,32, and 64b"""
self.name = name.upper()
self.dtype = dtype
self.widths = widths # dict indexed by 8,16,32,64
def __str__(self) | :
s = []
s.append("name {}".format(self.name))
s.append("datatype: {}".format(self.dtype))
s.append("widths: {}".format(",".join(self.widths.values())))
return " ".join(s)
completely_numeric = re.compile(r'^[0-9]+$') # only numbers
def _is_bits(val):
"""Return a number if the value is in explicit bits form:
[0-9]+bits, or None"""
global completely_numeric
length = len(val)
if length > 4:
if val[-4:] == "bits":
number_string = val[0:-4]
if completely_numeric.match(number_string):
return number_string
return None
def _op_immd(op):
if op.name == 'IMM0':
if op.oc2 == 'd':
return True
def _op_immw(op):
if op.name == 'IMM0':
if op.oc2 == 'w':
return True
def _op_immz(op):
if op.name == 'IMM0':
if op.oc2 == 'z':
return True
def _op_immv(op):
if op.name == 'IMM0':
if op.oc2 == 'v':
return True
return False
def _op_imm8(op):
if op.name == 'IMM0':
if op.oc2 == 'b':
return True
return False
def _get_mempop_width_code(v):
for op in v.parsed_operands:
if op.name == 'MEM0':
return op.oc2
die("Could not find evex memop for {}".format(v.iclass))
def _set_eosz(v):
eosz = 'oszall'
if v.space == 'legacy':
if 'EOSZ=1' in v.pattern:
eosz = 'o16'
elif 'EOSZ=2' in v.pattern:
eosz = 'o32'
elif 'EOSZ=3' in v.pattern:
eosz = 'o64'
elif 'EOSZ!=1' in v.pattern:
eosz = 'osznot16'
elif 'EOSZ!=3' in v.pattern:
eosz = 'osznot64'
if v.mode_restriction != 'unspecified':
if v.mode_restriction == 0: # 16b
if v.osz_required and 'IMMUNE66' not in v.pattern:
eosz = 'o32'
else:
eosz = 'o16'
elif v.mode_restriction == 1: # 32b
if v.osz_required and 'IMMUNE66' not in v.pattern:
eosz = 'o16'
else:
eosz = 'o32'
elif v.mode_restriction == 2: # 64b
if v.default_64b:
eosz = 'o64'
elif v.rexw_prefix == '1':
eosz = 'o64'
elif 'FORCE64' in v.pattern:
eosz = 'o64'
elif v.osz_required and 'IMMUNE66' not in v.pattern:
eosz = 'o16'
else:
eosz = 'o32'
v.eosz = eosz
def is_positive_integer(s):
if re.match(r'^[0-9]+$',s):
return True
return False
class xed_reader_t(object):
"""This class is designed to be used on the partial build materials
collected up in early part of the build and dumped in to the
BUILDDIR/dgen directory. Once initialized, the recs attribute
is what you'll iterate over to access the instruction records.
"""
def __init__(self,
state_bits_filename,
instructions_filename,
widths_filename,
element_types_filename,
cpuid_filename='',
map_descriptions_filename=''):
self.xtypes = self._gen_xtypes(element_types_filename)
self.width_type_dict, self.width_info_dict = self._gen_widths(widths_filename)
self.state_bits = self._parse_state_bits(state_bits_filename)
self.map_info = []
if map_descriptions_filename:
self.map_info = map_info_rdr.read_file(map_descriptions_filename)
self.deleted_unames = {}
self.deleted_instructions = {}
self.recs = self._process_lines(instructions_filename)
self._find_opcodes()
self._fix_real_opcode()
self._parse_operands()
self._generate_operands()
self._generate_memop_rw_field()
self._generate_missing_iforms()
self._summarize_operands()
self._summarize_vsib()
self._summarize_sibmem()
self.cpuid_map = {}
if cpuid_filename:
self.cpuid_map = cpuid_rdr.read_file(cpuid_filename)
self._add_cpuid()
self._add_vl()
self._add_broadcasting()
self._evex_disp8_scaling()
def get_width_info_dict(self):
return self.width_info_dict
def _refine_widths_input(self,lines):
"""Return a dict of width_info_t. Skip comments and blank lines"""
comment_pattern = re.compile(r'#.*$')
width_info_dict = {}
for line in lines:
pline = comment_pattern.sub('',line).strip()
if pline == '':
continue
wrds = pline.split()
ntokens = len(wrds)
# dtype is the assumed datatype for that width code
if ntokens == 3:
(name, dtype, all_width) = wrds
width8 = all_width
width16 = all_width
width32 = all_width
width64 = all_width
elif ntokens == 5:
width8='0'
(name, dtype, width16, width32, width64) = wrds
else:
die("Bad number of tokens on line: " + line)
# convert from bytes to bits, unless in explicit bits form "b'[0-9]+"
bit_widths = {}
for osz,val in zip([8,16,32,64], [width8, width16, width32, width64]):
number_string = _is_bits(val)
if number_string:
bit_widths[osz] = number_string
else:
bit_widths[osz] = str(int(val)*8)
width_info_dict[name] = width_info_t(name, dtype, bit_widths)
|
ethanhlc/streamlink | src/streamlink/packages/flashmedia/types.py | Python | bsd-2-clause | 45,802 | 0.000939 | from .compat import OrderedDict, is_py2, str, bytes, integer_types, string_types
from .util import pack_bytes_into
from collections import namedtuple
from struct import Struct, error as struct_error
from inspect import getargspec
(SCRIPT_DATA_TYPE_NUMBER, SCRIPT_DATA_TYPE_BOOLEAN,
SCRIPT_DATA_TYPE_STRING, SCRIPT_DATA_TYPE_OBJECT,
SCRIPT_DATA_TYPE_RESERVED, SCRIPT_DATA_TYPE_NULL,
SCRIPT_DATA_TYPE_UNDEFINED, SCRIPT_DATA_TYPE_REFERENCE,
SCRIPT_DATA_TYPE_ECMAARRAY, SCRIPT_DATA_TYPE_OBJECTEND,
SCRIPT_DATA_TYPE_STRICTARRAY, SCRIPT_DATA_TYPE_DATE,
SCRIPT_DATA_TYPE_LONGSTRING) = range(13)
SCRIPT_DATA_TYPE_AMF3 = 0x11
(AMF3_TYPE_UNDEFINED, AMF3_TYPE_NULL, AMF3_TYPE_FALSE, AMF3_TYPE_TRUE,
AMF3_TYPE_INTEGER, AMF3_TYPE_DOUBLE, AMF3_TYPE_STRING, AMF3_TYPE_XML_DOC,
AMF3_TYPE_DATE, AMF3_TYPE_ARRAY, AMF3_TYPE_OBJECT, AMF3_TYPE_XML,
AMF3_TYPE_BYTE_ARRAY, AMF3_TYPE_VECTOR_INT, AMF3_TYPE_VECTOR_UINT,
AMF3_TYPE_VECTOR_DOUBLE, AMF3_TYPE_VECTOR_OBJECT, AMF3_TYPE_DICT) = range(0x12)
AMF3_EMPTY_STRING = 0x01
AMF3_DYNAMIC_OBJECT = 0x0b
AMF3_CLOSE_DYNAMIC_OBJECT = 0x01
AMF3_CLOSE_DYNAMIC_ARRAY = 0x01
AMF3_MIN_INTEGER = -268435456
AMF3_MAX_INTEGER = 268435455
class PrimitiveType(Struct):
def __call__(self, *args):
return self.pack(*args)
def read(self, fd):
data = fd.read(self.size)
if len(data) != self.size:
raise IOError("Unable to read required amount of data")
return self.unpack(data)[0]
class PrimitiveClassType(PrimitiveType):
def __init__(self, format, cls):
self.cls = cls
PrimitiveType.__init__(self, format)
def pack(self, val):
return PrimitiveType.pack(self, *val)
def pack_into(self, buf, offset, val):
return PrimitiveType.pack_into(self, buf, offset, *val)
def unpack(self, data):
vals = PrimitiveType.unpack(self, data)
rval = self.cls(*vals)
return (rval,)
def unpack_from(self, buf, offset):
vals = PrimitiveType.unpack_from(self, buf, offset)
rval = self.cls(*vals)
return (rval,)
class DynamicType(object):
def __new__(cls, *args, **kwargs):
return cls.pack(*args, **kwargs)
@classmethod
def size(cls, val):
raise NotImplementedError
@classmethod
def pack(cls, val):
raise NotImplementedError
@classmethod
def pack_into(cls, buf, offset, val):
raise NotImplementedError
@classmethod
def read(cls, fd):
raise NotImplementedError
@classmethod
def unpack_from(cls, buf, offset):
raise NotImplementedError
@classmethod
def unpack(cls, buf):
return cls.unpack_from(buf, 0)
class TwosComplement(PrimitiveType):
def __init__(self, primitive):
self.primitive = primitive
bits = self.primitive.size * 8
self.maxval = 1 << bits
self.midval = self.maxval >> 1
self.upper = self.midval - 1
self.lower = -self.midval
@property
def size(self):
return 3
def pack(self, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.primitive.format,
self.lower, self.upper)
raise struct_error(msg)
if val < 0:
val = val + self.maxval
return self.primitive.pack(val)
def pack_into(self, buf, offset, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.primitive.format,
self.lower, self.upper)
raise struct_error(msg)
if val < 0:
val = val + self.maxval
return self.primitive.pack_into(buf, offset, val)
def unpack(self, data):
val = self.primitive.unpack(data)[0]
if val & self.midval:
val = val - self.maxval
return (val,)
def unpack_from(self, buf, offset):
val = self.primitive.unpack_from(buf, offset)[0]
if val & self.midval:
val = val - self.maxval
return (val,)
class HighLowCombo(PrimitiveType):
def __init__(self, format, highbits, reverse=True):
PrimitiveType.__init__(self, format)
self.highbits = highbits
self.lowmask = (1 << highbits) - 1
self.reverse = reverse
self.lower = 0
self.upper = (1 << (self.size * 8)) - 1
def pack(self, val):
if val < self.lower or val > self.upper:
msg = | "{0} format requires {1} <= number <= {2}".format(self.format,
self.lower, self.upper)
raise struct_error(msg)
if self.reverse:
| high = val >> self.highbits
low = val & self.lowmask
else:
high = val & self.lowmask
low = val >> self.highbits
return PrimitiveType.pack(self, high, low)
def pack_into(self, buf, offset, val):
if val < self.lower or val > self.upper:
msg = "{0} format requires {1} <= number <= {2}".format(self.format,
self.lower, self.upper)
raise struct_error(msg)
if self.reverse:
high = val >> self.highbits
low = val & self.lowmask
else:
high = val & self.lowmask
low = val >> self.highbits
return PrimitiveType.pack_into(self, buf, offset, high, low)
def unpack(self, data):
high, low = PrimitiveType.unpack(self, data)
if self.reverse:
ret = high << self.highbits
ret |= low
else:
ret = high
ret |= low << self.highbits
return (ret,)
def unpack_from(self, buf, offset):
high, low = PrimitiveType.unpack_from(self, buf, offset)
if self.reverse:
ret = high << self.highbits
ret |= low
else:
ret = high
ret |= low << self.highbits
return (ret,)
class FixedPoint(PrimitiveType):
def __init__(self, format, bits):
self.divider = float(1 << bits)
PrimitiveType.__init__(self, format)
def pack(self, val):
val *= self.divider
return PrimitiveType.pack(self, int(val))
def pack_into(self, buf, offset, val):
val *= self.divider
return PrimitiveType.pack_into(self, buf, offset, int(val))
def unpack(self, data):
val = PrimitiveType.unpack(self, data)[0]
val /= self.divider
return (val,)
def unpack_from(self, buf, offset):
val = PrimitiveType.unpack_from(self, buf, offset)[0]
val /= self.divider
return (val,)
class PaddedBytes(PrimitiveType):
def __init__(self, size, padding):
self.padded_size = size
self.padding = bytes(padding, "ascii")
@property
def size(self):
return self.padded_size
def pack(self, val):
rval = bytes(val[:self.size], "ascii")
if len(rval) < self.size:
paddinglen = self.size - len(rval)
rval += self.padding * paddinglen
return rval
def pack_into(self, buf, offset, val):
rval = bytes(val[:self.size], "ascii")
offset = pack_bytes_into(buf, offset, rval)
if len(rval) < self.size:
paddinglen = self.size - len(rval)
offset = pack_bytes_into(buf, offset, self.padding * paddinglen)
def unpack(self, data):
return (str(data.rstrip(self.padding), "ascii"),)
def unpack_from(self, buf, offset):
data = buf[offset:offset + self.padded_size]
return (str(data.rstrip(self.padding), "ascii"),)
""" 8-bit integer """
U8 = PrimitiveType("B")
S8 = PrimitiveType("b")
""" 16-bit integer """
U16BE = PrimitiveType(">H")
S16BE = PrimitiveType(">h")
U16LE = PrimitiveType("<H")
S16LE = PrimitiveType("<h")
""" 24-bit integer """
U24BE = HighLowCombo(">HB", 8, True)
S24BE = TwosComplement(U24BE)
U24LE = HighLowCombo("<HB", |
pczhaoyun/obtainfo | zinnia/views/mixins/entry_protection.py | Python | apache-2.0 | 2,489 | 0.002411 | """Protection mixins for Zinnia views"""
from django.contrib.auth.views import login
from django.conf import settings
class LoginMixin(object):
"""
Mixin implemeting a login view
configurated for Zinnia.
"""
def login(self):
"""
Return the login view.
"""
return login(self.request, 'zinnia/login.html')
class PasswordMixin(object):
"""
Mixin implementing a password view
configurated for Zinnia.
"""
error = False
def password(self):
"""
Return the password view.
"""
return self.response_class(request=sel | f.request,
template='zinnia/password.html',
context={'error': self.error})
class EntryProtectionMixin(LoginMixin, PasswordMixin):
"""
Mixin returning a login view if the current
entry need authentication and password view
if the entry is protected by a password.
"""
session_key = 'zinnia_entry_%s_password'
def stat_access(self):
try:
| self.object.access += 1
except TypeError:
self.object.access = 1
self.object.save()
def get(self, request, *args, **kwargs):
"""
Do the login and password protection.
"""
response = super(EntryProtectionMixin, self).get(
request, *args, **kwargs)
if self.object.login_required and not request.user.is_authenticated():
return self.login()
if (self.object.password and self.object.password !=
self.request.session.get(self.session_key % self.object.pk)):
return self.password()
self.stat_access()
return response
def post(self, request, *args, **kwargs):
"""
Do the login and password protection.
"""
self.object = self.get_object()
self.login()
if self.object.password:
entry_password = self.request.POST.get('entry_password')
if entry_password:
if entry_password == self.object.password:
self.request.session[self.session_key %
self.object.pk] = self.object.password
return self.get(request, *args, **kwargs)
else:
self.error = True
return self.password()
return self.get(request, *args, **kwargs)
|
h-s-c/ci-tools | run_ctest.py | Python | unlicense | 858 | 0.003497 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import platform
import os
import subprocess
if __name__ == "__main__":
# | Figure out | path of our own cmake install
CITOOLS_PATH = os.path.join(os.getcwd(), "ci-tools")
CMAKE_PATH = os.path.join(CITOOLS_PATH, "cmake")
if platform.system() == "Linux":
os.environ["PATH"] = os.path.join(CMAKE_PATH, "bin")+":"+os.environ.get("PATH", os.path.join(CMAKE_PATH, "bin"))
elif platform.system() == "Windows":
os.environ["PATH"] = os.path.join(CMAKE_PATH, "bin")+";"+os.environ.get("PATH", os.path.join(CMAKE_PATH, "bin"))
elif platform.system() == "Darwin":
os.environ["PATH"] = os.path.join(CMAKE_PATH, "CMake.app", "Contents", "bin")+":"+os.environ.get("PATH", os.path.join(CMAKE_PATH, "bin"))
subprocess.check_call("ctest -VV -S ci-tools/run_ctest.cmake", shell=True)
|
wangscript/libjingle-1 | trunk/third_party/webrtc/tools/barcode_tools/barcode_decoder.py | Python | bsd-3-clause | 10,771 | 0.008263 | #!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be f | ound in the AUTHORS file in the root of the s | ource tree.
import optparse
import os
import sys
import helper_functions
def convert_yuv_to_png_files(yuv_file_name, yuv_frame_width, yuv_frame_height,
output_directory, ffmpeg_dir=None):
"""Converts a YUV video file into PNG frames.
The function uses ffmpeg to convert the YUV file. The output of ffmpeg is in
the form frame_xxxx.png, where xxxx is the frame number, starting from 0001.
Args:
yuv_file_name(string): The name of the YUV file.
yuv_frame_width(int): The width of one YUV frame.
yuv_frame_height(int): The height of one YUV frame.
output_directory(string): The output directory where the PNG frames will be
stored.
ffmpeg_dir(string): The directory containing the ffmpeg executable. If
omitted, the PATH will be searched for it.
Return:
(bool): True if the conversion was OK.
"""
size_string = str(yuv_frame_width) + 'x' + str(yuv_frame_height)
output_files_pattern = os.path.join(output_directory, 'frame_%04d.png')
ffmpeg_executable = 'ffmpeg.exe' if sys.platform == 'win32' else 'ffmpeg'
if ffmpeg_dir:
ffmpeg_executable = os.path.join(ffmpeg_dir, ffmpeg_executable)
command = [ffmpeg_executable, '-s', '%s' % size_string, '-i', '%s'
% yuv_file_name, '-f', 'image2', '-vcodec', 'png',
'%s' % output_files_pattern]
try:
print 'Converting YUV file to PNG images (may take a while)...'
print ' '.join(command)
helper_functions.run_shell_command(
command, fail_msg='Error during YUV to PNG conversion')
except helper_functions.HelperError, err:
print >> sys.stderr, 'Error executing command: %s. Error: %s' % (command,
err)
return False
return True
def decode_frames(input_directory, zxing_dir=None):
"""Decodes the barcodes overlaid in each frame.
The function uses the Zxing command-line tool from the Zxing C++ distribution
to decode the barcode in every PNG frame from the input directory. The frames
should be named frame_xxxx.png, where xxxx is the frame number. The frame
numbers should be consecutive and should start from 0001.
The decoding results in a frame_xxxx.txt file for every successfully decoded
barcode. This file contains the decoded barcode as 12-digit string (UPC-A
format: 11 digits content + one check digit).
Args:
input_directory(string): The input directory from where the PNG frames are
read.
zxing_dir(string): The directory containing the zxing executable. If
omitted, the PATH will be searched for it.
Return:
(bool): True if the decoding went without errors.
"""
zxing_executable = 'zxing.exe' if sys.platform == 'win32' else 'zxing'
if zxing_dir:
zxing_executable = os.path.join(zxing_dir, zxing_executable)
print 'Decoding barcodes from PNG files with %s...' % zxing_executable
return helper_functions.perform_action_on_all_files(
directory=input_directory, file_pattern='frame_',
file_extension='png', start_number=1, action=_decode_barcode_in_file,
command_line_decoder=zxing_executable)
def _decode_barcode_in_file(file_name, command_line_decoder):
"""Decodes the barcode in the upper left corner of a PNG file.
Args:
file_name(string): File name of the PNG file.
command_line_decoder(string): The ZXing command-line decoding tool.
Return:
(bool): True upon success, False otherwise.
"""
command = [command_line_decoder, '--try-harder', '--dump-raw', file_name]
try:
out = helper_functions.run_shell_command(
command, fail_msg='Error during decoding of %s' % file_name)
print 'Image %s : decoded barcode: %s' % (file_name, out)
text_file = open('%s.txt' % file_name[:-4], 'w')
text_file.write(out)
text_file.close()
except helper_functions.HelperError, err:
print >> sys.stderr, 'Barcode in %s cannot be decoded.' % file_name
print >> sys.stderr, err
return False
return True
def _generate_stats_file(stats_file_name, input_directory='.'):
"""Generate statistics file.
The function generates a statistics file. The contents of the file are in the
format <frame_name> <barcode>, where frame name is the name of every frame
(effectively the frame number) and barcode is the decoded barcode. The frames
and the helper .txt files are removed after they have been used.
"""
file_prefix = os.path.join(input_directory, 'frame_')
stats_file = open(stats_file_name, 'w')
print 'Generating stats file: %s' % stats_file_name
for i in range(1, _count_frames_in(input_directory=input_directory) + 1):
frame_number = helper_functions.zero_pad(i)
barcode_file_name = file_prefix + frame_number + '.txt'
png_frame = file_prefix + frame_number + '.png'
entry_frame_number = helper_functions.zero_pad(i-1)
entry = 'frame_' + entry_frame_number + ' '
if os.path.isfile(barcode_file_name):
barcode = _read_barcode_from_text_file(barcode_file_name)
os.remove(barcode_file_name)
if _check_barcode(barcode):
entry += (helper_functions.zero_pad(int(barcode[0:11])) + '\n')
else:
entry += 'Barcode error\n' # Barcode is wrongly detected.
else: # Barcode file doesn't exist.
entry += 'Barcode error\n'
stats_file.write(entry)
os.remove(png_frame)
stats_file.close()
def _read_barcode_from_text_file(barcode_file_name):
"""Reads the decoded barcode for a .txt file.
Args:
barcode_file_name(string): The name of the .txt file.
Return:
(string): The decoded barcode.
"""
barcode_file = open(barcode_file_name, 'r')
barcode = barcode_file.read()
barcode_file.close()
return barcode
def _check_barcode(barcode):
"""Check weather the UPC-A barcode was decoded correctly.
This function calculates the check digit of the provided barcode and compares
it to the check digit that was decoded.
Args:
barcode(string): The barcode (12-digit).
Return:
(bool): True if the barcode was decoded correctly.
"""
if len(barcode) != 12:
return False
r1 = range(0, 11, 2) # Odd digits
r2 = range(1, 10, 2) # Even digits except last
dsum = 0
# Sum all the even digits
for i in r1:
dsum += int(barcode[i])
# Multiply the sum by 3
dsum *= 3
# Add all the even digits except the check digit (12th digit)
for i in r2:
dsum += int(barcode[i])
# Get the modulo 10
dsum = dsum % 10
# If not 0 substract from 10
if dsum != 0:
dsum = 10 - dsum
# Compare result and check digit
return dsum == int(barcode[11])
def _count_frames_in(input_directory = '.'):
"""Calculates the number of frames in the input directory.
The function calculates the number of frames in the input directory. The
frames should be named frame_xxxx.png, where xxxx is the number of the frame.
The numbers should start from 1 and should be consecutive.
Args:
input_directory(string): The input directory.
Return:
(int): The number of frames.
"""
file_prefix = os.path.join(input_directory, 'frame_')
file_exists = True
num = 1
while file_exists:
file_name = (file_prefix + helper_functions.zero_pad(num) + '.png')
if os.path.isfile(file_name):
num += 1
else:
file_exists = False
return num - 1
def _parse_args():
"""Registers the command-line options."""
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--zxing_dir', type='string',
help=('The path to the directory where the zxing executable'
'is located. If omitted, it will be assumed to be '
'present in the PATH.'))
parser.add_option('--ffmpeg_dir', type='string', defaul |
dya2/python-for-android | python3-alpha/python3-src/Tools/scripts/ndiff.py | Python | apache-2.0 | 3,825 | 0.002614 | #! /usr/bin/env python3
# Module ndiff version 1.7.0
# Released to the public domain 08-Dec-2000,
# by Tim Peters (tim.one@home.com).
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
# ndiff.py is now simply a front-end to the difflib.ndiff() function.
# Originally, it contained the difflib.SequenceMatcher class as well.
# This completes the raiding of reusable code from this formerly
# self-contained script.
"""ndiff [-q] file1 file2
or
ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
Print a human-friendly file difference report to stdout. Both inter-
and intra-line differences are noted. In the second form, recreate file1
(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
In the first form, if -q ("quiet") is not specified, the first two lines
of output are
-: file1
+: file2
Each remaining line begins with a two-letter code:
"- " line unique to file1
"+ " line unique to file2
" " line common to both files
"? " line not present in either input file
Lines beginning with "? " attempt to guide the eye to intraline
differences, and were not present in either input file. These lines can be
confusing if the source files contain tab characters.
The first file can be recovered by retaining only lines that begin with
" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
The second file can be recovered similarly, but by retaining only " " and
"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
recovered by piping the output through
sed -n '/^[+ ] /s/^..//p'
"""
__version__ = 1, 7, 0
import difflib, sys
def fail(msg):
out = sys.stderr.write
out(msg + "\n\n")
out(__doc__)
return 0
# open a file & return the file object; gripe and return 0 if it
# couldn't be opened
def fopen(fname):
try:
return open(fname, 'U')
except IOError as detail:
return fail("couldn't open " + fname + ": " + str(detail))
# open two files & spray the diff to stdout; return false iff a problem
def fcompare(f1name, f2name):
f1 = fopen(f1name)
f2 = fopen(f2name)
if not f1 or not f2:
return 0
a = f1.readlines(); f1.close()
b = f2.readlines(); f2.close()
for line in difflib.ndiff(a, b):
print(line, end=' ')
return 1
# crack args (sys.argv[1:] is normal) & compare;
# return false iff a problem
def main(args):
import getopt
try:
opts, args = getopt.getopt(args, "qr:")
except getopt.error as detail:
return fail(str(detail))
noisy = 1
qseen = rseen = 0
for opt, val in opts:
if opt == "-q":
qseen = 1
noisy = 0
elif opt == "-r":
rseen = 1
whichfile = val
if qseen and rseen:
return fail("can't specify both -q and -r")
if rseen:
if args:
return fail("no args allowed with -r option")
if whichfile in ("1", "2"):
restore(whichfile)
return 1
return fail("-r value must be 1 or 2")
if len(args) != 2:
return fail("need 2 filename args")
f1name, f2name = args
if noisy:
print('-:', f1name)
print('+:', f2name)
return fcompare(f1name, f2name)
# read ndiff output from stdin, and print file1 (which=='1') or
# file2 (which=='2 | ') to stdout
def restore(which):
restored = difflib.restore( | sys.stdin.readlines(), which)
sys.stdout.writelines(restored)
if __name__ == '__main__':
args = sys.argv[1:]
if "-profile" in args:
import profile, pstats
args.remove("-profile")
statf = "ndiff.pro"
profile.run("main(args)", statf)
stats = pstats.Stats(statf)
stats.strip_dirs().sort_stats('time').print_stats()
else:
main(args)
|
koery/win-sublime | Data/Packages/Default/switch_file.py | Python | mit | 1,112 | 0.002698 | import sublime, sublime_plugin
import os.path
import platform
def compare_file_names(x, y):
if platform.system() == 'Windows' or platform.system() == 'Darwin':
return x.lower() == y.lower()
else:
return x == y
class SwitchFileCommand(sublime_plugin.WindowCommand):
def run(self, extensions=[]):
if not self.windo | w.active_view():
return
fname = self.window.active_view().file_name()
if not fname:
| return
path = os.path.dirname(fname)
base, ext = os.path.splitext(fname)
start = 0
count = len(extensions)
if ext != "":
ext = ext[1:]
for i in range(0, len(extensions)):
if compare_file_names(extensions[i], ext):
start = i + 1
count -= 1
break
for i in range(0, count):
idx = (start + i) % len(extensions)
new_path = base + '.' + extensions[idx]
if os.path.exists(new_path):
self.window.open_file(new_path)
break
|
tLDP/lampadas | pylib/data/search.py | Python | gpl-2.0 | 4,556 | 0.019096 | #!/usr/bin/python
#
# This file is part of the Lampadas Documentation System.
#
# Copyright (c) 2000, 2001, 2002 David Merrill <david@lupercalia.net>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public Lice | nse
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from Globals import *
from | globals import *
from Config import config
from HTML import page_factory
from Tables import tables
from Sessions import sessions
from URLParse import URI
from Log import log
from mod_python import apache
import os
from CoreDM import dms
def document(req,
title='',
short_title='',
pub_status_code='',
type_code='',
topic_code='',
username='',
maintained='',
maintainer_wanted='',
lang='',
review_status_code='',
tech_review_status_code='',
pub_date='',
last_update='',
tickle_date='',
isbn='',
encoding='',
rating='',
format_code='',
dtd_code='',
license_code='',
copyright_holder='',
sk_seriesid='',
abstract='',
short_desc='',
collection_code='',
columns={},
layout='compact'
):
"""
Returns the results of a document search.
"""
# Read session state
sessions.get_session(req)
uri = URI(req.uri)
page = dms.page.get_by_id('doctable')
# serve search results by manually replacing the
# doctable here instead of during the regular call.
# It's a bit ugly, but works.
# We store and restore the contents to avoid doing
# a copy.deepcopy() which I haven't tested but imagine to
# be rather expensive. -- DCM
save_page = page.page[uri.lang]
table = tables.doctable(uri,
title = title,
short_title = short_title,
pub_status_code = pub_status_code,
type_code = type_code,
topic_code = topic_code,
username = username,
maintained = maintained,
maintainer_wanted = maintainer_wanted,
lang = lang,
review_status_code = review_status_code,
tech_review_status_code = tech_review_status_code,
pub_date = pub_date,
last_update = last_update,
tickle_date = tickle_date,
isbn = isbn,
encoding = encoding,
rating = rating,
format_code = format_code,
dtd_code = dtd_code,
license_code = license_code,
copyright_holder = copyright_holder,
sk_seriesid = sk_seriesid,
abstract = abstract,
short_desc = short_desc,
collection_code = collection_code,
layout = layout,
show_search = 1)
page.page[uri.lang] = page.page[uri.lang].replace('|tabdocs|', table)
uri = URI('doctable' + referer_lang_ext(req))
uri.base = '../../'
html = page_factory.build_page(page, uri)
# Restore the original page
page.page[uri.lang] = save_page
return html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.