repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
biln/airflow | airflow/example_dags/example_python_operator.py | Python | apache-2.0 | 1,774 | 0.000564 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific l | anguage governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range
from airflow.operators import PythonOperator
from airflow.models import DAG
from datetime import datetime, timedelta
import time
from pprint import pprint
seven_days_ago = datetime.combine(
datetime.today() - timedelta(7) | , datetime.min.time())
args = {
'owner': 'airflow',
'start_date': seven_days_ago,
}
dag = DAG(
dag_id='example_python_operator', default_args=args,
schedule_interval=None)
def my_sleeping_function(random_base):
'''This is a function that will run within the DAG execution'''
time.sleep(random_base)
def print_context(ds, **kwargs):
pprint(kwargs)
print(ds)
return 'Whatever you return gets printed in the logs'
run_this = PythonOperator(
task_id='print_the_context',
provide_context=True,
python_callable=print_context,
dag=dag)
for i in range(10):
'''
Generating 10 sleeping task, sleeping from 0 to 9 seconds
respectively
'''
task = PythonOperator(
task_id='sleep_for_'+str(i),
python_callable=my_sleeping_function,
op_kwargs={'random_base': float(i)/10},
dag=dag)
task.set_upstream(run_this)
|
CSCI1200Course/csci1200OnlineCourse | modules/data_source_providers/rest_providers.py | Python | apache-2.0 | 14,940 | 0.000268 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes providing REST data sources for common CourseBuilder items."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import schema_fields
from common import utils
from models import courses
from models import data_sources
from models import jobs
from models import models
from models import transforms
from tools import verify
class AssessmentsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'assessments'
@classmethod
def get_title(cls):
return 'Assessments'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry(
'Analytics',
description='Sets of questions determining student skill')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'integer',
description='Key uniquely identifying this particular assessment'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the assessment'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number',
'Scalar indicating how the results of this assessment are '
'to be weighted versus the results of peer assessments.'))
reg.add_property(schema_fields.SchemaField(
'html_check_answers', 'Check Answers', 'boolean',
'Whether students may check their answers before submitting '
'the assessment.'))
reg.add_property(schema_fields.SchemaField(
'properties', 'Properties', 'object',
'Set of key/value additional properties, not further defined.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
assessments = course.get_units_of_type(verify.UNIT_TYPE_ASSESSMENT)
ret = []
for assessment in assessments:
ret.append({
'unit_id': assessment.unit_id,
'title': assessment.title,
'weight': assessment.weight,
'html_check_answers': assessment.html_check_answers,
'properties': assessment.properties})
return ret, 0
class UnitsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'units'
@classmethod
def get_title(cls):
return 'Units'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry(
'Units',
description='Sets of lessons providing course content')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'integer',
description='Key uniquely identifying this particular unit'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the unit'))
reg.add_property(schema_fields.SchemaField(
'properties', 'Properties', 'object',
'Set of key/value additional properties, not further defined.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
units = course.get_units_of_type(verify.UNIT_TYPE_UNIT)
ret = []
for unit in units:
ret.append({
'unit_id': unit.unit_id,
'title': unit.title,
'properties': unit.properties,
})
return ret, 0
class LessonsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'lessons'
@classmethod
def get_title(cls):
return 'Lessons'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry(
'Lessons',
| description='Sets of lessons providing course content')
reg.add_property(schema | _fields.SchemaField(
'lesson_id', 'Unit ID', 'integer',
description='Key uniquely identifying which lesson this is'))
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'integer',
description='Key uniquely identifying unit lesson is in'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the unit'))
reg.add_property(schema_fields.SchemaField(
'scored', 'Scored', 'boolean',
'Boolean: Whether questions in this lesson count for scoring.'))
reg.add_property(schema_fields.SchemaField(
'has_activity', 'Has Activity', 'boolean',
'Boolean: does this lesson contain an activity?'))
reg.add_property(schema_fields.SchemaField(
'activity_title', 'Activity Title', 'string',
'Title of the activity (if lesson has an activity)'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
lessons = course.get_lessons_for_all_units()
ret = []
for lesson in lessons:
ret.append({
'lesson_id': lesson.unit_id,
'unit_id': lesson.unit_id,
'title': lesson.title,
'scored': lesson.scored,
'has_activity': lesson.has_activity,
'activity_title': lesson.activity_title,
})
return ret, 0
class StudentAssessmentScoresDataSource(
data_sources.AbstractDbTableRestDataSource):
"""Unpack student assessment scores from student record.
NOTE: Filtering/ordering, if present, will be done based on Student
attributes, not scores. (The scores are in an encoded string in a
field which is not indexed anyhow.) The only meaningful field to
index or filter on is enrolled_on.
"""
@classmethod
def get_name(cls):
return 'assessment_scores'
@classmethod
def get_title(cls):
return 'Assessment Scores'
@classmethod
def get_context_class(cls):
return data_sources.DbTableContext
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log):
reg = schema_fields.FieldRegistry('Unit',
description='Course sub-components')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='Student ID encrypted with a session-specific key'))
reg.add_property(schema_fields.SchemaField(
'id', 'Unit ID', 'string',
description='ID of assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Title of the assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'score', 'Score', 'integer',
description='Value from 0 to 100 indicating % correct.'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'integer',
description='Value from 0 to 100 indicating % |
chrisxue815/leetcode_python | problems/test_0844_stack.py | Python | unlicense | 744 | 0 | import unittest
import utils
def remove_backspace(s):
result = []
for ch in s:
if ch == '#':
if result:
result.pop()
else:
result.append(ch)
return result
# O(n) time. O(n) space. Stack.
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
return remove_backspace(S) == remove_backspace(T)
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().backspaceCompare(**case.args.__dict__)
| self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| |
KarlTDebiec/MDclt | primary/raw.py | Python | bsd-3-clause | 7,794 | 0.016295 | # -*- coding: utf-8 -*-
# MDclt.primary.raw.py
#
# Copyright (C) 2012-2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Classes for transfer of data from raw text files to h5
.. todo:
- Look into improving speed (np.loadtxt or np.genfromtxt may actually be
preferable)
- Alow scaleoffset to be specified
"""
################################### MODULES ####################################
from __future__ import division, print_function
import os, sys
import numpy as np
from MDclt import Block, Block_Acceptor, primary
################################## FUNCTIONS ###################################
def add_parser(tool_subparsers, **kwargs):
"""
Adds subparser for this analysis to a nascent argument parser
**Arguments:**
:*tool_subparsers*: argparse subparsers object to add subparser
:*args*: Passed to tool_subparsers.add_parser(...)
:*kwargs*: Passed to tool_subparsers.add_parser(...)
"""
from MDclt import overridable_defaults
subparser = primary.add_parser(tool_subparsers,
name = "raw",
help = "Load raw text files")
arg_groups = {ag.title: ag for ag in subparser._action_groups}
arg_groups["input"].add_argument(
"-frames_per_file",
type = int,
required = False,
help = "Number of frames in each file; used to check if new data " +
"is present")
arg_groups["input"].add_argument(
"-dimensions",
type = int,
required = False,
nargs = "*",
help = "Additional dimensions in dataset; if multidimensional " +
"(optional)")
arg_groups["output"].add_argument(
"-output",
type = str,
required = True,
nargs = "+",
action = overridable_defaults(nargs = 2, defaults = {1: "/dataset"}),
help = "H5 file and optionally address in which to output data " +
"(default address: /dataset)")
subparser.set_defaults(analysis = command_line)
def command_line(n_cores = 1, **kwargs):
"""
Provides command line functionality for this analysis
**Arguments:**
:*n_cores*: Number of cores to use
.. todo:
- Figure out syntax to get this into MDclt.primary
"""
from multiprocessing import Pool
from MDclt import pool_director
block_generator = Raw_Block_Generator(**kwargs)
block_acceptor = Block_Acceptor(outputs = block_generator.outputs,
**kwargs)
if n_cores == 1: # Serial
for block in block_generator:
block()
block_acceptor.send(block)
else: # Parallel (processes)
pool = Pool(n_cores)
for block in pool.imap_unordered(pool_director, block_generator):
block_acceptor.send(block)
pool.close()
pool.join()
block_acceptor.close()
################################### CLASSES ####################################
class Raw_Block_Generator(primary.Primary_Block_Generator):
"""
Generator class that prepares blocks of analysis
"""
def __init__(self, infiles, dimensions, output, frames_per_file = None,
**kwargs):
"""
Initializes generator
**Arguments:**
:*output*: List including path to h5 file and
address within h5 file
:*infiles*: List of infiles
:*frames_per_file*: Number of frames in each infile
:*dimensions*: Additional dimensions in dataset; if
multidimensional (optional)
.. todo:
- Intelligently break lists of infiles into blocks larger
than 1
"""
# Input
self.infiles = infiles
self.frames_per_file = frames_per_file
self.infiles_per_block = 5
if dimensions is None:
self.dimensions = []
else:
self.dimensions = dimensions
# Output
self.outputs = [(output[0], os.path.normpath(output[1]))]
# Action
self.dtype = np.float32
super(Raw_Block_Generator, self).__init__(**kwargs)
# Output
self.outputs = [(output[0], os.path.normpath(output[1]),
tuple([self.final_slice.stop - self.final_slice.start]
+ self.dimensions))]
def next(self):
| """
Prepares and returns next Block of analysis
"""
if len(self.infiles) == 0:
raise StopIteration()
else:
block_infiles = self.infiles[:self.infiles_per_block]
block_slice = slice(self.start_index,
self.start_index + len(block_infi | les) * self.frames_per_file, 1)
self.infiles = self.infiles[self.infiles_per_block:]
self.start_index += len(block_infiles) * self.frames_per_file
return Raw_Block(infiles = block_infiles,
output = self.outputs[0],
slc = block_slice,
dimensions = self.dimensions,
dtype = self.dtype)
class Raw_Block(Block):
"""
Independent block of analysis
"""
def __init__(self, infiles, output, dtype, slc, dimensions = [],
attrs = {}, **kwargs):
"""
Initializes block of analysis
**Arguments:**
:*infiles*: List of infiles
:*output*: For each dataset, path to h5 file, address
within h5 file, and if appropriate final
shape of dataset; list of tuples
:*dtype*: Data type of nascent dataset
:*slc*: Slice within dataset at which this block
will be stored
:*dimensions*: Additional dimensions in dataset; if
multidimensional (optional)
:*attrs*: Attributes to add to dataset
"""
super(Raw_Block, self).__init__(**kwargs)
self.infiles = infiles
self.dimensions = dimensions
self.output = output
self.datasets = {self.output: dict(slc = slc, attrs = attrs,
kwargs = dict(maxshape = [None] + dimensions,
scaleoffset = 4))}
def __call__(self, **kwargs):
"""
Runs this block of analysis
"""
from subprocess import Popen, PIPE
# Load raw data into numpy using shell commands; there may be a faster
# way to do this; but this seems faster than np.loadtxt()
# followed by np.concanenate() for multiple files
command = "cat {0} | sed ':a;N;$!ba;s/\\n//g'".format(
" ".join(self.infiles))
process = Popen(command, stdout = PIPE, shell = True)
input_bytes = bytearray(process.stdout.read())
dataset = np.array(np.frombuffer(input_bytes, dtype = "S8",
count = int((len(input_bytes) -1) / 8)), np.float32)
# np.loadtxt alternative; keep here for future testing
# dataset = []
# for infile in self.infiles:
# dataset += [np.loadtxt(infile)]
# dataset = np.concatenate(dataset)
# Reshape if necessary
if len(self.dimensions) != 0:
dataset = dataset.reshape(
[dataset.size / np.product(self.dimensions)] + self.dimensions)
# Store in instance variable
self.datasets[self.output]["data"] = dataset
|
weaver-viii/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_iris_nfoldsRF.py | Python | apache-2.0 | 616 | 0.016234 | import sys
sys | .path.insert(1, "../../../")
import h2o
def iris_nfolds(ip,port):
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
model = h2o.random_forest(y=iris[4], x=iris[0:4], ntrees=50, nfolds=5)
model.show()
# Can specify both nfolds >= 2 and validation = H2OParsedData at once
try:
h2o.random_forest(y=iris[4], x=iris[0:4], validation_y=iris[4], validation_x=iris | [0:4], ntrees=50, nfolds=5)
assert True
except EnvironmentError:
assert False, "expected an error"
if __name__ == "__main__":
h2o.run_test(sys.argv, iris_nfolds)
|
apache/incubator-airflow | airflow/config_templates/default_webserver_config.py | Python | apache-2.0 | 4,695 | 0.000639 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Default configuration for the Airflow webserver"""
import os
from airflow.www.fab_security.manager import AUTH_DB
# from airflow.www.fab_security.manager import | AUTH_LDAP
# from airflow.www.fab_security.manager import AUTH_OAUTH
# from airflow.www.fab_security.manager import AUTH_OID
# from airflow.www.fab_security.manager import AUTH_REMOTE_USER
basedir = os.path.abspath(os.path.dirname(__file__))
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# --------------- | -------------------------------------
# For details on how to set up each of the following authentication, see
# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods
# for details.
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
# AUTH_OAUTH : Is for OAuth
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The recaptcha it's automatically enabled for user self registration is active and the keys are necessary
# RECAPTCHA_PRIVATE_KEY = PRIVATE_KEY
# RECAPTCHA_PUBLIC_KEY = PUBLIC_KEY
# Config for Flask-Mail necessary for user self registration
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_USE_TLS = True
# MAIL_USERNAME = 'yourappemail@gmail.com'
# MAIL_PASSWORD = 'passwordformail'
# MAIL_DEFAULT_SENDER = 'sender@gmail.com'
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using OAuth Auth, uncomment to setup provider(s) info
# Google OAuth example:
# OAUTH_PROVIDERS = [{
# 'name':'google',
# 'token_key':'access_token',
# 'icon':'fa-google',
# 'remote_app': {
# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',
# 'client_kwargs':{
# 'scope': 'email profile'
# },
# 'access_token_url':'https://accounts.google.com/o/oauth2/token',
# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',
# 'request_token_url': None,
# 'client_id': GOOGLE_KEY,
# 'client_secret': GOOGLE_SECRET_KEY,
# }
# }]
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# When using OpenID Auth, uncomment to setup OpenID providers.
# example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ----------------------------------------------------
# Theme CONFIG
# ----------------------------------------------------
# Flask App Builder comes up with a number of predefined themes
# that you can use for Apache Airflow.
# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes
# Please make sure to remove "navbar_color" configuration from airflow.cfg
# in order to fully utilize the theme. (or use that property in conjunction with theme)
# APP_THEME = "bootstrap-theme.css" # default bootstrap
# APP_THEME = "amelia.css"
# APP_THEME = "cerulean.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css"
# APP_THEME = "darkly.css"
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "lumen.css"
# APP_THEME = "paper.css"
# APP_THEME = "readable.css"
# APP_THEME = "sandstone.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css"
# APP_THEME = "solar.css"
# APP_THEME = "spacelab.css"
# APP_THEME = "superhero.css"
# APP_THEME = "united.css"
# APP_THEME = "yeti.css"
|
almeidapaulopt/erpnext | erpnext/payroll/doctype/payroll_entry/payroll_entry.py | Python | gpl-3.0 | 29,720 | 0.026447 | # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from dateutil.relativedelta import relativedelta
from frappe import _
from frappe.desk.reportview import get_filters_cond, get_match_cond
from frappe.model.document import Document
from frappe.query_builder.functions import Coalesce
from frappe.utils import (
DATE_FORMAT,
add_days,
add_to_date,
cint,
comma_and,
date_diff,
flt,
getdate,
)
import erpnext
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import (
get_accounting_dimensions,
)
from erpnext.accounts.utils import get_fiscal_year
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
class PayrollEntry(Document):
def onload(self):
if not self.docstatus==1 or self.salary_slips_submitted:
return
# check if salary slips were manually submitted
entries = frappe.db.count("Salary Slip", {'payroll_entry': self.name, 'docstatus': 1}, ['name'])
if cint(entries) == len(self.employees):
self.set_onload("submitted_ss", True)
def validate(self):
self.number_of_employees = len(self.employees)
def on_submit(self):
self.create_salary_slips()
def before_submit(self):
self.validate_employee_details()
if self.validate_attendance:
if self.validate_employee_attendance():
frappe.throw(_("Cannot Submit, Employees left to mark attendance"))
def validate_employee_details(self):
emp_with_sal_slip = []
for employee_details in self.employees:
if frappe.db.exists("Salary Slip", {"employee": employee_details.employee, "start_date": self.start_date, "end_date": self.end_date, "docstatus": 1}):
emp_with_sal_slip.append(employee_details.employee)
if len(emp_with_sal_slip):
frappe.throw(_("Salary Slip already exists for {0}").format(comma_and(emp_with_sal_slip)))
def on_cancel(self):
frappe.delete_doc("Salary Slip", frappe.db.sql_list("""select name from `tabSalary Slip`
where payroll_entry=%s """, (self.name)))
self.db_set("salary_slips_created", 0)
self.db_set("salary_slips_submitted", 0)
def get_emp_list(self):
"""
Returns list of active employees based on selected criteria
and for which salary structure exists
"""
self.check_mandatory()
filters = self.make_filters()
cond = get_filter_condition(filters)
cond += get_joining_relieving_condition(self.start_date, self.end_date)
condition = ''
if self.payroll_frequency:
condition = """and payroll_frequency = '%(payroll_frequency)s'"""% {"payroll_frequency": self.payroll_frequency}
sal_struct = get_sal_struct(self.company, self.currency, self.salary_slip_based_on_timesheet, condition)
if sal_struct:
cond += "and t2.salary_structure IN %(sal_struct)s "
cond += "and t2.payroll_payable_account = %(payroll_payable_account)s "
cond += "and %(from_date)s >= t2.from_date"
emp_list = get_emp_list(sa | l_struct, cond, self.end_date, self.payroll_payable_account)
emp_list = remove_payrolled_employees(emp_list, self.start_date, self.end_date)
return emp_list
def make_filters(self):
filters = frappe._dict()
filters['company'] = self.company
filters['branch'] = self.branch
filters['department'] = self.department
filters['designation'] = self.de | signation
return filters
@frappe.whitelist()
def fill_employee_details(self):
self.set('employees', [])
employees = self.get_emp_list()
if not employees:
error_msg = _("No employees found for the mentioned criteria:<br>Company: {0}<br> Currency: {1}<br>Payroll Payable Account: {2}").format(
frappe.bold(self.company), frappe.bold(self.currency), frappe.bold(self.payroll_payable_account))
if self.branch:
error_msg += "<br>" + _("Branch: {0}").format(frappe.bold(self.branch))
if self.department:
error_msg += "<br>" + _("Department: {0}").format(frappe.bold(self.department))
if self.designation:
error_msg += "<br>" + _("Designation: {0}").format(frappe.bold(self.designation))
if self.start_date:
error_msg += "<br>" + _("Start date: {0}").format(frappe.bold(self.start_date))
if self.end_date:
error_msg += "<br>" + _("End date: {0}").format(frappe.bold(self.end_date))
frappe.throw(error_msg, title=_("No employees found"))
for d in employees:
self.append('employees', d)
self.number_of_employees = len(self.employees)
if self.validate_attendance:
return self.validate_employee_attendance()
def check_mandatory(self):
for fieldname in ['company', 'start_date', 'end_date']:
if not self.get(fieldname):
frappe.throw(_("Please set {0}").format(self.meta.get_label(fieldname)))
@frappe.whitelist()
def create_salary_slips(self):
"""
Creates salary slip for selected employees if already not created
"""
self.check_permission('write')
employees = [emp.employee for emp in self.employees]
if employees:
args = frappe._dict({
"salary_slip_based_on_timesheet": self.salary_slip_based_on_timesheet,
"payroll_frequency": self.payroll_frequency,
"start_date": self.start_date,
"end_date": self.end_date,
"company": self.company,
"posting_date": self.posting_date,
"deduct_tax_for_unclaimed_employee_benefits": self.deduct_tax_for_unclaimed_employee_benefits,
"deduct_tax_for_unsubmitted_tax_exemption_proof": self.deduct_tax_for_unsubmitted_tax_exemption_proof,
"payroll_entry": self.name,
"exchange_rate": self.exchange_rate,
"currency": self.currency
})
if len(employees) > 30:
frappe.enqueue(create_salary_slips_for_employees, timeout=600, employees=employees, args=args)
else:
create_salary_slips_for_employees(employees, args, publish_progress=False)
# since this method is called via frm.call this doc needs to be updated manually
self.reload()
def get_sal_slip_list(self, ss_status, as_dict=False):
"""
Returns list of salary slips based on selected criteria
"""
ss = frappe.qb.DocType("Salary Slip")
ss_list = (
frappe.qb.from_(ss)
.select(ss.name, ss.salary_structure)
.where(
(ss.docstatus == ss_status)
& (ss.start_date >= self.start_date)
& (ss.end_date <= self.end_date)
& (ss.payroll_entry == self.name)
& ((ss.journal_entry.isnull()) | (ss.journal_entry == ""))
& (Coalesce(ss.salary_slip_based_on_timesheet, 0) == self.salary_slip_based_on_timesheet)
)
).run(as_dict=as_dict)
return ss_list
@frappe.whitelist()
def submit_salary_slips(self):
self.check_permission('write')
ss_list = self.get_sal_slip_list(ss_status=0)
if len(ss_list) > 30:
frappe.enqueue(submit_salary_slips_for_employees, timeout=600, payroll_entry=self, salary_slips=ss_list)
else:
submit_salary_slips_for_employees(self, ss_list, publish_progress=False)
def email_salary_slip(self, submitted_ss):
if frappe.db.get_single_value("Payroll Settings", "email_salary_slip_to_employee"):
for ss in submitted_ss:
ss.email_salary_slip()
def get_salary_component_account(self, salary_component):
account = frappe.db.get_value("Salary Component Account",
{"parent": salary_component, "company": self.company}, "account")
if not account:
frappe.throw(_("Please set account in Salary Component {0}")
.format(salary_component))
return account
def get_salary_components(self, component_type):
salary_slips = self.get_sal_slip_list(ss_status = 1, as_dict = True)
if salary_slips:
ss = frappe.qb.DocType("Salary Slip")
ssd = frappe.qb.DocType("Salary Detail")
salary_components = (
frappe.qb.from_(ss)
.join(ssd)
.on(ss.name == ssd.parent)
.select(ssd.salary_component, ssd.amount, ssd.parentfield, ss.salary_structure, ss.employee)
.where(
(ssd.parentfield == component_type)
& (ss.name.isin(tuple([d.name for d in salary_slips])))
)
).run(as_dict=True)
return salary_components
def get_salary_component_total(self, component_type = None):
salary_components = self.get_salary_components(component_type)
if salary_components:
component_dict = {}
self.employee_cost_centers = {}
for item in salary_components:
employee_cost_centers = self.get_payroll_cost_centers_for_emplo |
Diagon/CFApp | cakefactory/tests.py | Python | mit | 79 | 0 | # Write unit | tests to perform before implementation
# Create your tests here | .
|
tuskar/tuskar-ui | horizon/tables/base.py | Python | apache-2.0 | 54,322 | 0.000295 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import logging
from operator import attrgetter
import sys
from django.conf import settings
from django.core import urlresolvers
from django import forms
from django.http import HttpResponse
from django import template
from django.template.defaultfilters import truncatechars
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.html import escape
from django.utils import http
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils import termcolors
from django.utils.translation import ugettext_lazy as _
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon.tables.actions import FilterAction
from horizon.tables.actions import LinkAction
from horizon.utils import html
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
STRING_SEPARATOR = "__"
class Column(html.HTMLElement):
""" A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized.
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to ``True``.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: allowed_data_types
A list of data types for which the link should be created.
Default is an empty list (``[]``).
When the list is empty and the ``link`` attribute is not None, all the
rows under this column will be links.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('off', False),
)
.. attribute:: display_choices
A tuple of tuples representing the possible values to substitute
the data when displayed in the column cell.
.. attribute:: empty_value
A string or callable to be used for cells which have no data.
Defaults to the string ``"-"``.
.. attribute:: summation
A string containing the name of a summation method to be used in
the generation of a summary row for this column. By default the
options are ``"sum"`` or ``"average"``, which behave as expected.
Optional.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
.. attribute:: classes
An iterable of CSS classes which should be added to this column.
Example: ``classes=('foo', 'bar')``.
.. attribute:: attrs
A dict of HTML attribute strings which should be added to this column.
Example: ``attrs={"data-foo": "bar"}``.
.. attribute:: truncate
An integer for the maximum length of the string in this column. If the
data in this column is larger than the supplied number, the data for
this column will be truncated and an ellipsis will be appended to the
truncated data.
Defaults to ``None``.
.. attribute:: link_classes
An iterable of CSS classes which will be added when the column's text
is displayed as a link.
Example: ``classes=('link-foo', 'link-bar')``.
Defaults to ``None``.
"""
summation_methods = {
"sum": sum,
"average": lambda data: sum(data, 0.0) / len(data)
}
# Used to retain order when instantiating columns on a table
creation_c | ounter = 0
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', | False),
('false', False),
('inactive', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=True,
link=None, allowed_data_types=[], hidden=False, attrs=None,
status=False, status_choices=None, display_choices=None,
empty_value=None, filters=None, classes=None, summation=None,
auto=None, truncate=None, link_classes=None,
# FIXME: Added for TableStep:
form_widget=None, form_widget_attributes=None
):
self.classes = list(classes or getattr(self, "classes", []))
super(Column, self).__init__()
self.attrs.update(attrs or {})
if callable(transform):
self.transform = transform
self.name = transform.__name__
else:
self.transform = unicode(transform)
self.name = self.transform
# Empty string is a valid value for verbose_name
if verbose_name is None:
verbose_name = self.transform.title()
else:
verbose_name = verbose_name
self.auto = auto
self.sortable = sortable
self.verbose_name = verbose_name
self.link = link
self.allowed_data_types = allowed_data_types
self.hidden = hidden
self.status = status
self.empty_value = empty_value or '-'
self.filters = filters or []
self.truncate = truncate
self.link_classes = link_classes or []
self.form_widget = form_widget # FIXME: TableStep
self.form_widget_attributes = form_widget_attributes or {} # TableStep
if status_choices:
self.status_choices = status_choices
self. |
argilo/contest-sdr | blade_rx.py | Python | gpl-3.0 | 14,830 | 0.009171 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Blade Rx
# Generated: Wed Jun 8 20:57:16 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from PyQt4.QtCore import QObject, pyqtSlot
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import osmosdr
import sip
import sys
import time
class blade_rx(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Blade Rx")
Qt.QWidget.__init__(self)
self.setWindowTitle("Blade Rx")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "blade_rx")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.cal_freq = cal_freq = 626309441
self.cal_band = cal_band = (cal_freq - 100e3) / 1e6
self.vga2_gain = vga2_gain = 10
self.vga1_gain = vga1_gain = -25
self.tx_text = tx_text = ""
self.tune = tune = 100
self.samp_rate = samp_rate = 4000000
self.rf_gain = rf_gain = 3
self.offset = offset = 200000
self.decimation = decimation = 20
self.correction = correction = 0
self.bb_gain = bb_gain = 30
self.band = band = cal_band
##################################################
# Blocks
##################################################
self._tune_range = Range(80, 120, 0.01, 100, 200)
self._tune_win = RangeWidget(self._tune_range, self.set_tune, "Tune (kHz)", "counter_slider", float)
self.top_grid_layout.addWidget(self._tune_win, 1,4,1,3)
self._rf_gain_range = Range(0, 6, 3, 3, 200)
self._rf_gain_win = RangeWidget(self._rf_gain_range, self.set_rf_gain, "RF gain", "counter_slider", float)
self.top_grid_layout.addWidget(self._rf_gain_win, 0,3,1,1)
self._correction_range = Range(-20, 20, 1, 0, 200)
self._correction_win = RangeWidget(self._correction_range, self.set_correction, "PPM", "counter", float)
self.top_grid_layout.addWidget(self._correction_win, 0,1,1,1)
self._bb_gain_range = Range(5, 60, 1, 30, 200)
self._bb_gain_win = RangeWidget(self._bb_gain_range, self.set_bb_gain, "BB gain", "counter_slider", float)
self.top_grid_layout.addWidget(self._bb_gain_win, 0,4,1,1)
self._band_options = [cal_band, 432, 903, 1296, 2304, 3456]
self._band_labels = ["Calib.", "432", "903", "1296", "2304", "3456"]
self._band_tool_bar = Qt.QToolBar(self)
self._band_tool_bar.addWidget(Qt.QLabel("Band"+": "))
self._band_combo_box = Qt.QComboBox()
self._band_tool_bar.addWidget(self._band_combo_box)
for label in self._band_labels: self._band_combo_box.addItem(label)
self._band_callback = lambda i: Qt.QMetaObject.invokeMethod(self._band_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._band_options.index(i)))
self._band_callback(self.band)
self._band_combo_box.currentIndexChanged.connect(
lambda i: self.set_band(self._band_options[i]))
self.top_grid_layout.addWidget(self._band_tool_bar, 0,0,1,1)
self.volume_mult = blocks.multiply_const_vff((10, ))
self._vga2_gain_range = Range(0, 25, 1, 10, 200)
self._vga2_gain_win = RangeWidget(self._vga2_gain_range, self.set_vga2_gain, "VGA2", "counter_slider", float)
self.top_grid_layout.addWidget(self._vga2_gain_win, 0,6,1,1)
self._vga1_gain_range = Range(-35, -4, 1, -25, 200)
self._vga1_gain_win = RangeWidget(self._vga1_gain_range, self.set_vga1_gain, "VGA1", "counter_slider", float)
self.top_grid_layout.addWidget(self._vga1_gain_win, 0,5,1,1)
self.usb_filter = filter.fir_filter_ccc(25, firdes.complex_band_pass(
1, samp_rate / decimation, 200, 2800, 200, firdes.WIN_HAMMING, 6.76))
self._tx_text_tool_bar = Qt.QToolBar(self)
self._tx_text_tool_bar.addWidget(Qt.QLabel("CW to send"+": "))
self._tx_text_line_edit = Qt.QLineEdit(str(self.tx_text))
self._tx_text_tool_bar.addWidget(self._tx_text_line_edit)
self._tx_text_line_edit.returnPressed.connect(
lambda: self.set_tx_text(str(str(self._tx_text_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._tx_text_tool_bar, 1,0,1,4)
self.rf_in = osmosdr.source( args="numchan=" + str(1) + " " + "" )
self.rf_in.set_sample_rate(samp_rate)
self.rf_in.set_center_freq(band * (1 + correction / 1e6) * 1e6 + 100000 - offset, 0)
self.rf_in.set_freq_corr(0, 0)
self.rf_in.set_dc_offset_mode(0, 0)
self.rf_in.set_iq_balance_mode(0, 0)
self.rf_in.set_gain_mode(False, 0)
self.rf_in.set_gain(rf_gain, 0)
self.rf_in.set_if_gain(0, 0)
self.rf_in.set_bb_gain(bb_gain, 0)
self.rf_in.set_antenna("", 0)
self.rf_in.set_bandwidth(1750000, 0)
self.offset_osc_2 = analog.sig_source_c(samp_rate / decimation, analog.GR_COS_WAVE, 100000 - tune * 1000 + 700, 1, 0)
self.offset_osc_1 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, -offset, 1, 0)
self.mixer_2 = blocks.multiply_vcc(1)
self.mixer_1 = blocks.multiply_vcc(1)
self.interpolator = filter.rational_resampler_fff(
interpolation=6,
decimation=1,
taps=None,
fractional_bw=None,
)
self.if_waterfall = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
100000, #fc
samp_rate / decimation, #bw
"", #name
1 #number of inputs
)
self.if_waterfall.set_update_time(0.10)
self.if_waterfall.enable_grid(False)
self.if_waterfall.enable_axis_labels(True)
if not True:
self.if_waterfall.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.if_waterfall.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.if_waterfall.set_line_label(i, "Data {0}".format(i))
else:
self.if_waterfall.set_line_label(i, labels[i])
self.if_waterfall.set_color_ | map(i, colors[i])
self.if_waterfall.set_line_alpha(i, alphas[i])
self.if_waterfall.set_intensity_range(-120, 0)
self._if_waterfall_win = sip.wrapin | stance(self.if_waterfall.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidge |
jasonsahl/LS-BSR | tools/isolate_uniques_BSR.py | Python | gpl-3.0 | 1,944 | 0.00463 | #!/usr/bin/env python
"""extract only the unique IDs from a BSR matrix"""
from __future__ import print_function
from optparse import OptionParser
import sys
import os
def test_file(option, opt_str, value, parser):
try:
with open(value): setattr(parser.values, option.dest, value)
except IOError:
print('%s file cannot be opened' % option)
sy | s.exit()
def filter_uniques(matrix, threshold):
outfile = open("uniques_BSR_matrix", "w")
with open(matrix) as in_matrix:
firstLine = in_matrix.readline()
outdata = []
with open(matrix) as in_matrix:
for line in in_matrix:
fields = line.split()
totals = len(fields[1:])
presents = []
| for x in fields[1:]:
try:
if float(x)>=float(threshold):
presents.append(fields[0])
except:
pass
if int(len(presents))<int(2):
outdata.append(fields[0])
outfile.write(line)
outfile.close()
return outdata
def main(matrix, threshold):
filter_uniques(matrix, threshold)
if __name__ == "__main__":
usage="usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-b", "--bsr_matrix", dest="matrix",
help="/path/to/bsr_matrix [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-t", "--threshold", dest="threshold",
help="lower threshold for ORF presence, defaults to 0.4",
action="store", default="0.4", type="float")
options, args = parser.parse_args()
mandatories = ["matrix"]
for m in mandatories:
if not options.__dict__[m]:
print("\nMust provide %s.\n" %m)
parser.print_help()
exit(-1)
main(options.matrix, options.threshold)
|
dnlmc/fp | fp_scrape3.py | Python | mit | 207 | 0.004831 | import r | e
# extract all <p> tags
friend17 = friendsoup17.find_all('p')
# remove html tags
friend17clean = []
for i in range(len(friend17)):
friend17clean.append(re.sub('<[^>]+>', '', friend17[i | ]))
|
jballanc/openmicroscopy | components/tools/OmeroWeb/omeroweb/webstart/urls.py | Python | gpl-2.0 | 1,117 | 0.011638 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# publish | ed by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thi | s program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from django.conf.urls import *
from omeroweb.webstart import views
urlpatterns = patterns('django.views.generic.simple',
url( r'^$', views.index, name="webstart_index" ),
url( r'^jars/insight\.jnlp$', views.insight, name='webstart_insight'),
)
|
hva/warehouse | warehouse/skill/api/resources/product.py | Python | mit | 1,071 | 0.001867 | from tastypie import fields
from tastypie.resources import ModelResource
from warehouse.skill.api.meta import MetaBase
from warehouse.skill.models import Product
product_weight = 'SELECT SUM(weight) FROM skill_operation WHERE product_id = skill_product.id'
product_len = 'SELECT SUM(len) FROM | skill_operation WHERE product_id = skill_product.id'
class ProductResource(ModelResource):
taxonomy_id = fields.IntegerField(attribute='taxonomy_id', null=True)
| weight = fields.FloatField(attribute='weight', null=True)
len = fields.FloatField(attribute='len', null=True)
class Meta(MetaBase):
queryset = Product.objects.all().extra(select={
'weight': product_weight,
'len': product_len
})
resource_name = 'product'
filtering = {
'taxonomy_id': ['in']
}
ordering = ['title']
# def get_object_list(self, request):
# queryset = super(ProductResource, self).get_object_list(request)
# assert False, queryset.query
# return queryset
|
manthey/girder | plugins/authorized_upload/plugin_tests/upload_test.py | Python | apache-2.0 | 5,434 | 0.001656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2016 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder.constants import SettingKey
from girder.models.folder import Folder
from girder.models.setting import Setting
from girder.models.token import Token
from girder.models.upload import Upload
from girder.models.user import User
from tests import base
from girder_authorized_upload.constants import TOKEN_SCOPE_AUTHORIZED_UPLOAD
def setUpModule():
base.enabledPlugins.append('authorized_upload')
base.startServer()
def tearDownModule():
base.stopServer()
class AuthorizedUploadTest(base.TestCase):
def setUp(self):
super(AuthorizedUploadTest, self).setUp()
self.admin = User().createUser(
login='admin',
password='passwd',
firstName='admin',
lastName='admin',
email='admin@admin.org'
)
for folder in Folder().childFolders(parent=self.admin, parentType='user', user=self.admin):
if folder['public'] is True:
self.publicFolder = folder
else:
self.privateFolder = folder
def testAuthorizedUpload(self):
Setting().set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 1)
# Anon access should not work
resp = self.request('/authorized_upload', method='POST', params={
'folderId': self.privateFolder['_id']
})
self.assertStatus(resp, 401)
# Create our secure URL
resp = self.request('/authorized_upload', method='POST', user=self.admin, params={
'folderId': self.privateFolder['_id']
})
self.assertStatusOk(resp)
parts = resp.json['url'].rsplit('/', 3)
tokenId, folderId = parts[-1], parts[-2]
token = Token().load(tokenId, force=True, objectId=False)
self.assertIsNotNone(token)
self.assertEqual(folderId, str(self.privateFolder['_id']))
self.assertEqual(set(token['scope']), {
| TOKEN_SCOPE_AUTHORIZED_UPLOAD,
'authorized_upload_folder_%s' % self.privateFolder['_id']
})
| # Make sure this token doesn't let us upload into a different folder
params = {
'parentType': 'folder',
'parentId': self.publicFolder['_id'],
'name': 'hello.txt',
'size': 11,
'mimeType': 'text/plain'
}
resp = self.request(path='/file', method='POST', params=params, token=tokenId)
self.assertStatus(resp, 401)
# Initialize upload into correct folder
params['parentId'] = self.privateFolder['_id']
resp = self.request(path='/file', method='POST', params=params, token=tokenId)
self.assertStatusOk(resp)
# We should remove the scope that allows further uploads
upload = Upload().load(resp.json['_id'])
token = Token().load(tokenId, force=True, objectId=False)
self.assertEqual(token['scope'], [
'authorized_upload_folder_%s' % self.privateFolder['_id']
])
# Authorized upload ID should be present in the token
self.assertEqual(token['authorizedUploadId'], upload['_id'])
# Attempting to initialize new uploads using the token should fail
resp = self.request(path='/file', method='POST', params=params, token=tokenId)
self.assertStatus(resp, 401)
# Uploading a chunk should work with the token
fields = [('offset', 0), ('uploadId', str(upload['_id']))]
files = [('chunk', 'hello.txt', 'hello ')]
resp = self.multipartRequest(path='/file/chunk', token=tokenId, fields=fields, files=files)
self.assertStatusOk(resp)
# Requesting our offset should work with the token
# The offset should not have changed
resp = self.request(path='/file/offset', method='GET', token=tokenId, params={
'uploadId': upload['_id']
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['offset'], 6)
# Upload the second chunk
fields = [('offset', 6), ('uploadId', str(upload['_id']))]
files = [('chunk', 'hello.txt', 'world')]
resp = self.multipartRequest(path='/file/chunk', token=tokenId, fields=fields, files=files)
self.assertStatusOk(resp)
# Trying to upload more chunks should fail
fields = [('offset', 11), ('uploadId', str(upload['_id']))]
files = [('chunk', 'hello.txt', 'more bytes')]
resp = self.multipartRequest(path='/file/chunk', token=tokenId, fields=fields, files=files)
self.assertStatus(resp, 401)
# The token should be destroyed
self.assertIsNone(Token().load(tokenId, force=True, objectId=False))
|
gkabbe/cMDLMC | mdlmc/LMC/MDMC.py | Python | gpl-3.0 | 10,013 | 0.001398 | # coding=utf-8
from abc import ABCMeta
import logging
from typing import Iterator
import numpy as np
from mdlmc.topo.topology import NeighborTopology
from ..misc.tools import remember_last_element
from ..LMC.output import CovalentAutocorrelation, MeanSquareDisplacement
from ..cython_exts.LMC.PBCHelper import AtomBox
from ..LMC.jumprate_generators import JumpRate
logger = logging.getLogger(__name__)
logging.getLogger("matplotlib").setLevel(logging.WARN)
def get_git_version():
from mdlmc.version_hash import commit_hash, commit_date, commit_message
print("# Hello. I am from commit {}".format(commit_hash))
print("# Commit Date: {}".format(commit_date))
print("# Commit Message: {}".format(commit_message))
class KMCLattice:
"""Implementation of the time-dependent Kinetic Monte Carlo Scheme"""
__show_in_config__ = True
__no_config_parameter__ = ["topology", "atom_box", "jumprate_function"]
def __init__(self, topology: "NeighborTopology", *,
atom_box: "AtomBox",
jumprate_function: "JumpRate",
lattice_size: int,
proton_number: int,
donor_atoms: str,
time_step: float,
extra_atoms: str = None):
"""
Parameters
----------
trajectory
lattice_size
proton_number
jumprate_function
donor_atoms:
name of donor / acceptor atoms
extra_atoms:
extra atoms used for the determination of the jump rate
"""
self.topology = topology
self._lattice = self._initialize_lattice(lattice_size, proton_number)
# Check whether the topology object has the method "take_lattice_reference
if hasattr(self.topology, "take_lattice_reference"):
logger.debug("topology has method take_lattice_reference")
self.topology.take_lattice_reference(self._lattice)
self._atom_box = atom_box
self._jumprate_function = jumprate_function
self._donor_atoms = donor_atoms
self._time_step = time_step
self._extra_atoms = extra_atoms
def _initialize_lattice(self, lattice_size, proton_number):
lattice = np.zeros(lattice_size, dtype=np.int32)
lattice[:proton_number] = range(1, proton_number + 1)
np.random.shuffle(lattice)
return lattice
def __iter__(self) -> Iterator[np.ndarray]:
yield from self.continuous_output()
def continuous_output(self):
current_frame_number = 0
topo = self.topology
lattice = self.lattice
topology_iterator, last_topo = remember_last_element(iter(self.topology))
jumprate_iterator, last_jumprates = remember_last_element(
jumprate_generator(self._jumprate_function, self.lattice, topology_iterator))
sum_of_jumprates = (np.sum(jumpr) for _, _, jumpr in jumprate_iterator)
kmc_routine = self.fastforward_to_next_jump(sum_of_jumprates,
self._time_step)
for f, df, kmc_time in kmc_routine:
current_time = kmc_time
logger.debug("Next jump at time %.2f", current_time)
logger.debug("df = %s; dt = %s", df, kmc_time)
logger.debug("Go to frame %s", f)
for frame in self.topology.get_cached_frames():
yield current_frame_number, current_time, frame
current_frame_number += 1
proton_idx = self.move_proton(*last_jumprates(), lattice)
topo.update_time_of_last_jump(proton_idx, kmc_time)
def move_proton(self, start, dest, jump_rates, lattice):
"""Given the hopping rates between the acceptor atoms, choose a connection randomly and
move the proton."""
start_occupied_destination_free = filter_allowed_transitions(start, dest, lattice)
start = start[start_occupied_destination_free]
dest = dest[start_occupied_destination_free]
jump_rates = jump_rates[start_occupied_destination_free]
cumsum = np.cumsum(jump_rates)
random_draw = np.random.uniform(0, cumsum[-1])
transition_idx = np.searchsorted(cumsum, random_draw)
start_idx = start[transition_idx]
destination_idx = dest[transition_idx]
proton_idx = self._lattice[start_idx]
logger.debug("Particle %s moves from %s to %s", proton_idx, start_idx, destination_idx)
logger.debug("lattice[%s] = %s", destination_idx, self._lattice[destination_idx])
self._lattice[destination_idx] = proton_idx
self._lattice[start_idx] = 0
return proton_idx
@staticmethod
def fastforward_to_next_jump(jumprates, dt):
"""Implements Kinetic Monte Carlo with time-dependent rates.
Parameters
----------
jumprates : generator / iterator
Unit: femtosecond^{-1}
Proton jump rate from an oxygen site to any neighbor
dt : float
Trajectory time step
Returns
-------
frame: int
Frame at which the next event occurs
delta_frame : int
Difference between frame and the index at which the next event occurs
kmc_time : float
Time of the next event
"""
sweep, kmc_time = 0, 0
current_rate = next(jumprates)
while True:
time_selector = -np.log(1 - np.random.random())
# Handle case where time selector is so small that the next frame is not reached
t_trial = time_selector / current_rate
if (kmc_time + t_trial) // dt == kmc_time // dt:
kmc_time += t_trial
delta_frame = 0
else:
delta_t, delta_frame = dt - kmc_time % dt, 1
current_probsum = current_rate * delta_t
next_rate = next(jumprates)
next_probsum = current_probsum + next_rate * dt
while next_probsum < time_selector:
delta_frame += 1
current_probsum = next_probsum
next_rate = next(jumprates)
next_probsum = current_probsum + next_rate * dt
rest = time_selector - current_probsum
delta_t += (delta_frame - 1) * dt + rest / next_rate
kmc_time += delta_t
sweep += delta_frame
| yield sweep, delta_frame, kmc_time
def xyz_output(self, particle_type: str = "H"):
for f, t, frame in self:
particle_positions = frame[self.donor_atoms][self.occupied_sites]
particle_positions.atom_names = particle_type
yield frame.append(particle_positions)
def observables_output(self, reset_frequency: int, print_frequency: int):
"""
Parameters
-- | --------
reset_frequency: int
print_frequency: int
Returns
-------
"""
kmc_iterator = iter(self)
donor_sites = self.donor_atoms
current_frame_number, current_time, frame = next(kmc_iterator)
autocorr = CovalentAutocorrelation(self.lattice)
msd = MeanSquareDisplacement(frame[donor_sites].atom_positions, self.lattice, self._atom_box)
for current_frame_number, current_time, frame in kmc_iterator:
if current_frame_number % reset_frequency == 0:
autocorr.reset(self.lattice)
msd.reset_displacement()
msd.update_displacement(frame[donor_sites].atom_positions, self.lattice)
if current_frame_number % print_frequency == 0:
auto = autocorr.calculate(self.lattice)
msd_result = msd.msd()
yield current_frame_number, current_time, msd_result, auto
@property
def lattice(self):
return self._lattice
@property
def donor_atoms(self):
# TODO: not needed (?)
return self._donor_atoms
@property
def extra_atoms(self):
return self._extra_atoms
@property
def occupied_sites(self):
return np.where(self._lattice > 0)[0 |
paulrouget/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py | Python | mpl-2.0 | 2,669 | 0.001499 | import pytest
import sys
import unittest
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
@pytest.mark.xfail(sys.version[0] == "3",
reason="wptmanifest.parser doesn't support py3")
class TestStatic(unittest.TestCase):
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(children), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
| data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_ | 1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), {"key", "key_1"})
self.assertEquals(set(manifest.itervalues()), {"value_1", "other_value"})
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
|
varun-verma11/CodeDrill | djangoSRV/djangoSRV/urls.py | Python | bsd-2-clause | 4,085 | 0.002448 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from teacher import *
from Views.teacher_view import get_teacher_view, get_overview, get_year_overview, get_class_overview, get_assignment_overview
from Views.set_exercise import get_set_exercise_page, send_exercise_to_class, get_view_spec_form
from Views.authenticate import authenticate_student, authenticate_teacher, check_user_name_exists
from Views.login import student_login, teacher_login
from Views.register import register_student, register_teacher
from Views.student_view import get_student_view
from Views.submit_code import submit_student_code, run_self_test
from Views.single_exercise_code_view import single_exercise_view
from Views.student_grades import student_grades_view
from Views.home import home_page
from Views.logout import logout_user
from Views.view_spec import view_spec, get_exercise_details
from Views.settings import teacher_account_settings, delete_teaching_class, student_account_settings, class_settings, change_password, change_email, get_registered_students_in_course, add_new_class, update_class_name, update_course_students, get_student_submission
from Views.add_new_exercise import add_new_exercise, create_exercise
from Views.view_submissions import view_student_submissions, view_submissions_teacher, get_student_feedback, submit_student_feedback
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^teacher/$', viewSubmissionMark),
url(r'^selectable/', include('selectable.urls')),
url(r'^teacher/class-settings/manage-class/$', update_course_students),
url(r'^teacher/class-settings/delete-class/$', delete_teaching_class),
url(r'^teacher/get-overview/', get_overview),
url(r'^teacher/get-year-overview/', get_year_overview),
url(r'^teacher/get-class-overview/', get_class_overview),
url(r'^teacher/get-assignment-overview/', get_assignment_overview),
url(r'^student/changepassword/$', change_password),
url(r'^student/view-submissions/get-feedback/(\d+)/$', get_student_feedback),
url(r'^teacher/changepassword/$', change_password),
url(r'^teacher/get-students-in-class/$', get_registered_students_in_course),
url(r'^teacher/add-new-class/$', add_new_class),
url(r'^teacher/get-exercise/$', get_exercise_details),
url(r'^teacher/update-class-name/', update_class_name),
url(r'^teacher/submit-exercise/', send_exercise_to_class),
url(r'^student/changeemail/$', change_email),
url(r'^teacher/changeemail/$', change_email),
url(r'^teacher/account-settings/', teacher_account_settings),
url(r'^student/account-settings/', student_account_settings),
url(r'^class-settings/', class_settings),
url(r'^teacher-view/$', get_teacher_view),
url(r'^set-exercise/$', get_set_exercise_page),
url(r'^authenticate_student/$', authenticate_stude | nt),
url(r'^authenticate_teacher/$', authenticate_teacher),
url(r'^student-login/$', student_login),
url(r'^teacher-login/$', teacher_login),
url(r'^register-student/$', register_student),
url(r'^register-teacher/$', register_teacher),
url(r'^student-view/$', get_student_view),
url(r'^submit-code/(\d+ | )/$', submit_student_code),
url(r'^code-single-exercise/(\d+)/$', single_exercise_view),
url(r'^student-grades/$', student_grades_view),
url(r'^logout/$', logout_user),
url(r'^view-spec/$', view_spec),
url(r'^check-username/$', check_user_name_exists),
url(r'^student/test/self-defined/$', run_self_test),
url(r'^teacher/add-new-exercise/$',add_new_exercise),
url(r'^teacher/add-new-exercise/submit-exercise/$',create_exercise),
url(r'^student/view-submissions/$', view_student_submissions),
url(r'^teacher/view-submissions/$', view_submissions_teacher),
url(r'^teacher/view-submissions/send-feedback/$', submit_student_feedback),
url(r'^teacher/get-student-submission/$', get_student_submission),
url(r'^teacher/set-exercise/view-spec-form/(\d+)/$', get_view_spec_form),
url(r'^$', home_page)
)
|
skkeeper/linux-clicky | linux_clicky/play_sound.py | Python | mit | 691 | 0.005806 | #!/usr/bin/env python2
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 2 -*-
# Author: Fábio André Damas <skkeeper at gmail dot com>
from threading import Thread
from subprocess import Popen, PIPE
class PlaySound(Thread):
def __init__(self, filename, volume):
Thread.__init__(self)
self.filename = filename
self.volume = volume
def run(self):
cmd = 'play -v ' + self.volume + ' ' + self.filename
p = Popen(cmd, shell=True, stderr=PIPE, close_fds=T | rue)
# TODO: Test if limits the number of clicks
p.wait()
if p.returncode != 0:
print '\033[1;31mWe found a error with SoX, did you install it?\033[1;m'
p.s | tderr.read()
|
batxes/4Cin | Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models8148.py | Python | gpl-3.0 | 13,920 | 0.025216 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((4135.63, -1070.6, 11660.5), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((4398.66, -1885.45, 11556), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets: |
s=new_marker_set('particle_2 geometr | y')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4346, -175.265, 10569.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((4223.5, 1861.87, 9387.15), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((4224.1, 2470.58, 8995.04), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2596.13, 1379.53, 8172.18), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3316.53, 2240.06, 6607.33), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2583.82, 1751.55, 5136.33), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((3387, 1724.66, 3770.93), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((4317.71, 1934.83, 2179.92), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((5963.21, 1383.66, 2065.49), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6342.36, -373.552, 922.922), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6711.54, -2098.47, -184.939), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((6382.88, -2315.65, 1390.69), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((6780.75, -1179.03, 265.654), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((6674.81, 354.788, 232.452), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((6063.62, 1175.64, 1159.87), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5400.78, 2218.88, 2007.65), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3951.99, 1285, 2398.29), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3014.12, 2460.53, 2339.28), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2097.93, 3863.9, 1690.04), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((882.166, 4466.2, 643.004), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((1847.75, 4807.14, 1639.62), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((3845.84, 5275.07, 2103.55), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5900.88, 5746.76, 1743.52), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6891.95, 5990.09, 1469.8), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7156.82, 7390.62, 3791.02), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((7863.39, 8865, 4672.27), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((6883.27, 9508.46, 5162.61), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5306.04, 11132.3, 5692.61), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5604.24, 10591.3, 5499.9), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6435.75, 11209.8, 6255.67), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= mar |
CingHu/neutron-ustack | neutron/extensions/loadbalancer.py | Python | apache-2.0 | 19,800 | 0.000303 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
imp | ort abc
from oslo.config import cfg
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as qexception
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import service_base
# Loadbalancer Exceptions
class DelayOrTimeoutInvalid(qexception.BadRequest):
message = _("Delay must be greater than | or equal to timeout")
class NoEligibleBackend(qexception.NotFound):
message = _("No eligible backend for pool %(pool_id)s")
class VipNotFound(qexception.NotFound):
message = _("Vip %(vip_id)s could not be found")
class VipExists(qexception.NeutronException):
message = _("Another Vip already exists for pool %(pool_id)s")
class PoolNotFound(qexception.NotFound):
message = _("Pool %(pool_id)s could not be found")
class MemberNotFound(qexception.NotFound):
message = _("Member %(member_id)s could not be found")
class HealthMonitorNotFound(qexception.NotFound):
message = _("Health_monitor %(monitor_id)s could not be found")
class PoolMonitorAssociationNotFound(qexception.NotFound):
message = _("Monitor %(monitor_id)s is not associated "
"with Pool %(pool_id)s")
class PoolMonitorAssociationExists(qexception.Conflict):
message = _('health_monitor %(monitor_id)s is already associated '
'with pool %(pool_id)s')
class StateInvalid(qexception.NeutronException):
message = _("Invalid state %(state)s of Loadbalancer resource %(id)s")
class PoolInUse(qexception.InUse):
message = _("Pool %(pool_id)s is still in use")
class HealthMonitorInUse(qexception.InUse):
message = _("Health monitor %(monitor_id)s still has associations with "
"pools")
class PoolStatsNotFound(qexception.NotFound):
message = _("Statistics of Pool %(pool_id)s could not be found")
class ProtocolMismatch(qexception.BadRequest):
message = _("Protocol %(vip_proto)s does not match "
"pool protocol %(pool_proto)s")
class MemberExists(qexception.NeutronException):
message = _("Member with address %(address)s and port %(port)s "
"already present in pool %(pool)s")
RESOURCE_ATTRIBUTE_MAP = {
'vips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'address': {'allow_post': True, 'allow_put': False,
'default': attr.ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'port_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'protocol_port': {'allow_post': True, 'allow_put': False,
'validate': {'type:range': [0, 65535]},
'convert_to': attr.convert_to_int,
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'pool_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid': None},
'is_visible': True},
'session_persistence': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'type': {'type:values': ['APP_COOKIE',
'HTTP_COOKIE',
'SOURCE_IP'],
'required': True},
'cookie_name': {'type:string': None,
'required': False}}},
'is_visible': True},
'connection_limit': {'allow_post': True, 'allow_put': True,
'default': -1,
'convert_to': attr.convert_to_int,
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status_description': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'pools': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'vip_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
# Only doing this because of new API
# This should be validated in plugin that it is
# a required attribute
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']},
'is_visible': True},
'provider': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED},
'lb_method': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
# Only doing this because of new API
# This should be |
shidarin/AT-DiceRoller | modules/criticalInjuries.py | Python | gpl-3.0 | 29,380 | 0.013104 | #!/usr/bin/kivy
# Critical Injury Module
# By Sean Wallitsch, 2013/08/20
# Built in Modules
from random import randint
from time import strftime # For timestamping history log
# Kivy Modules
from kivy.app import App # Base App Class
from kivy.adapters.listadapter import ListAdapter # For History list
from kivy.uix.gridlayout import GridLayout # Only Using Grid Layouts
from kivy.uix.label import Label # Label Class for Returns
from kivy.uix.button import Button # Button Class for everything else
from kivy.uix.listview import ListItemButton, ListItemLabel, \
CompositeListItem, ListView # For Right Side History
from kivy.uix.slider import Slider # For Controls
from kivy.uix.popup import Popup # For viewing description in History
from kivy.graphics import Canvas, Color, Rectangle # For backgrounds
from kivy.core.image import Image # For textures
from kivy.clock import Clock # For scheduling a deselect of the history list
# ==============================================================================
# AT GENERIC VARIABLES
# ==============================================================================
# Markup Shortcuts
SWF = "[font=fonts/sw_symbols.ttf]"
SWFC = "[/font]"
DC = "[color=873ead]"
CC = "[/color]"
# ==============================================================================
# APP SPECIFIC VARIABLES
# ==============================================================================
# For ResultDisplay
HEADER_TEXT = ('Roll', 'Severity', 'Result')
COL_SIZES = (20,20,60)
# DICTs
RESULTS ={
"Minor Nick": "The target suffers 1 strain.",
"Slowed Down": "The target can only act during the last allied initiative slot on his next turn.",
"Sudden Jolt": "The target drops whatever is in hand.",
"Distracted": "The target cannot perform a free maneuver during his next turn.",
"Off-Balance": "Add (1) Setback Die to target's next skill check.",
"Discouraging Wound": "Flip one light side Destiny point to the dark side (reverse if NPC).",
"Stunned": "The target is staggered until the end of target's next turn.",
"Stinger": "Increase difficulty of next check by (1).",
"Bowled Over": "The target is knocked prone and suffers (1) strain.",
"Head Ringer": "The target increases the difficulty of all Intellect and Cunning checks by (1) until the end of the encounter.",
"Fearsome Wound": "The target increases the difficulty of all Presence and Willpower checks by (1) until the end of the encounter.",
"Agonizing Wound": "The target increases the difficulty of all Brawn and Agility checks by (1) until the end of the encounter.",
"Slightly Dazed": "The target is disoriented until the end of the encounter.",
"Scattered Senses": "The target removes all Boost Die from skill checks until the end of the encounter.",
"Hamstrung": "The target loses their free maneuver until the end of the encounter.",
"Overpowered": "The target leaves himself open, and the attacker may immediately attempt another free attack against him, using the exact same pool as the original attack.",
"Winded": "Until the end of the encounter, the target cannot voluntarily suffer strain to activate any abilities or gain additional maneuvers.",
"Compromised": "Increase difficulty of all skill checks by (1) until the end of the encounter.",
"At the Brink": "The target suffers (1) strain each time they perform an action",
"Crippled": "One of the target's limbs (selected by the GM) is crippled until healed or replaced. Increase difficulty of all checks that require use of that limb by one.",
"Maimed": "One of the target's limbs (selected by the GM) is permanently lost. Unless the target has a cybernetic replacement, the target cannot perform actions that would require the use of that limb. All other actions gain (1) Setback Die.",
"Horrific Injury": "Randomly roll a 1d10 to determine one of the target's characteristics 1-3 for Brawn, 4-6 for Agility, 7 for Intellect, 8 for Cunning, 9 for Presence, 10 for Willpower. Until this Critical Injury is healed, treat that characteristic as (1) point lower.",
"Temporarily Lame": "Until this Critical Injury is healed, the target cannot perform more than one maneuver during their turn.",
"Blinded": "The target can no longer see. Upgrade the difficulty of all checks twice. Upgrade the difficulty of all Perception and Vigilance checks three times.",
"Knocked Senseless": "The target is staggered for the remainder of the encounter.",
"Gruesome Injury": "Randomly roll a 1d10 to determine one of the target's characteristics 1-3 for Brawn, 4-6 for Agility, 7 for Intellect, 8 for Cunning, 9 for Presence, 10 for Willpower. That characteristic is permanently reduced by (1), to a minimum of (1).",
"Bleeding Out": "Every round, the target suffers (1) wound and (1) strain at the beginning of their turn. For every five wounds they suffer beyond their wound threshold, they suffer one additional Critical Injury (cannot suffer this one again).",
"The End is Nigh": "The target will die after the last Initiative slot during the next round.",
"Dead": "Complete, obliterated death"
}
SEVERITY = [None, 'Easy', 'Average', 'Hard', 'Daunting', '-']
# ==============================================================================
# FUNCTIONS
# ==============================================================================
def crit_chart(roll):
"""Takes a roll value and interprets it according to the CI_chart"""
if roll >= 151:
severity = 5
elif roll >= 126:
severity = 4
elif roll >= 91:
severity = 3
elif roll >= 41:
severity = 2
else:
severity = 1
if roll >= 151:
name = "Dead"
elif roll >= 141:
name = "The End is Nigh"
elif roll >= 131:
name = "Bleeding Out"
elif roll >= 126:
name = "Gruesome Injury"
elif roll >= 121:
name = "Knocked Senseless"
elif roll >= 116:
name = "Blinded"
elif roll >= 111:
name = "Temporarily Lame"
elif roll >= 106:
name = "Horrific Injury"
elif roll >= 101:
name = "Maimed"
elif roll >= 95:
name = "Crippled"
elif roll >= 91:
name = "At the Brink"
elif roll >= 86:
name = "Compromised"
elif roll >= 81:
name = "Winded"
elif roll >= 76:
name = "Overpowered"
elif roll >= 71:
name = "Hamstrung"
elif roll >= 66:
name = "Scattered Senses"
elif roll >= 61:
name = "Slightly Dazed"
elif roll >= 56:
name = "Agonizing Wound"
elif roll >= 51:
na | me = "Fearsome Wound"
elif roll >= 46:
name = "Head Ringer"
elif roll >= 41:
name = "Bowled Over"
elif roll >= 36:
name = "Stinger"
elif roll | >= 31:
name = "Stunned"
elif roll >= 26:
name = "Discouraging Wound"
elif roll >= 21:
name = "Off-Balance"
elif roll >= 16:
name = "Distracted"
elif roll >= 11:
name = "Sudden Jolt"
elif roll >= 6:
name = "Slowed Down"
else:
name = "Minor Nick"
return severity, name
def new_pos(old_size = (1,1), new_size = (1,1), pos = (1,1), pos_mult = (0,0)):
"""Shortcut for adjusting a position based on widget size changes"""
new_pos = (
pos[0] + (old_size[0] - new_size[0]) * pos_mult[0],
pos[1] + (old_size[1] - new_size[1]) * pos_mult[1]
)
return new_pos
# ==============================================================================
# GUI CLASSES
# ==============================================================================
# ==============================================================================
# RESULTBOX WIDGETS
# ==============================================================================
class crit_HeaderRoll(Label):
"""Header for Roll"""
def __init__(self, **kwargs):
super(crit_HeaderRoll, self).__init__(**kwargs)
self.text = HEADER_TEXT[0]
self.size_hint_x = COL_SIZES[0]
class crit_HeaderSeverity(Label):
"""Header for Severity"""
|
rackerlabs/qonos | qonos/db/sqlalchemy/migrate_repo/versions/008_add_index_to_schedules.py | Python | apache-2.0 | 1,187 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Index
INDEX_NAME = 'next_run_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
schedules = Table('schedules', meta, autoload=True)
index = Index(INDEX_NAME, schedules.c.next_run)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
schedules = Table('schedules', meta, autoload=T | rue)
index = Index(INDEX_NAME, schedules.c.n | ext_run)
index.drop(migrate_engine)
|
Gebesa-Dev/Addons-gebesa | purchase_order_o2o_procurement/models/__init__.py | Python | agpl-3.0 | 148 | 0 | # -*- coding: utf-8 -*-
# © 2017 Cesar Barron | Bautista
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import purcha | se
|
plotly/python-api | packages/python/plotly/plotly/tests/test_orca/test_orca_server.py | Python | mit | 5,402 | 0.001111 | from unittest import TestCase
import plotly.io as pio
import subprocess
import os
from distutils.version import LooseVersion
import requests
import time
import psutil
import pytest
import plotly.graph_objects as go
# Fixtures
# --------
from plotly.io._orca import find_open_port, which, orca_env
@pytest.fixture()
def setup():
# Set problematic environment variables
os.environ["NODE_OPTIONS"] = "--max-old-space-size=4096"
os.environ["ELECTRON_RUN_AS_NODE"] = "1"
# Reset orca state
pio.orca.reset_status()
pio.orca.config.restore_defaults()
# Run setup before every test function in this file
pytestmark = pytest.mark.usefixtures("setup")
# Utilities
# ---------
def ping_pongs(server_url):
try:
response = requests.post(server_url + "/ping")
except requests.exceptions.ConnectionError:
# Expected
return False
return response.status_code == 200 and response.content.decode("utf-8") == "pong"
def test_validate_orca():
assert pio.orca.status.state == "unvalidated"
pio.orca.validate_executable()
assert pio.orca.status.state == "validated"
def test_orca_not_found():
pio.orca.config.executable = "bogus"
with pytest.raises(ValueError) as err:
pio.orca.validate_executable()
assert pio.orca.status.state == "unvalidated"
assert "could not be found" in str(err.value)
def test_invalid_executable_found():
pio.orca.config.executable = "python"
with pytest.raises(ValueError) as err:
pio.orca.validate_executable()
assert pio.orca.status.state == "unvalidated"
assert "executable that was found at" in str(err.value)
def test_orca_executable_path():
assert pio.orca.status.executable is None
if os.name == "nt": # Windows
expected = subprocess.check_output(["where", "orca"]).decode("utf-8").strip()
else: # Linux / OS X
expected = subprocess.check_output(["which", "orca"]).decode("utf-8").strip()
pio.orca.validate_executable()
assert pio.orca.status.executable == expected
def test_orca_version_number():
assert pio.orca.status.version is None
expected_min = LooseVersion("1.1.0")
expected_max = LooseVersion("2.0.0")
pio.orca.validate_executable()
version = LooseVersion(pio.orca.status.version)
assert expected_min <= version
assert version < expected_max
def test_ensure_orca_ping_and_proc():
pio.orca.config.timeout = None
assert pio.orca.status.port is None
assert pio.orca.status.pid is None
pio.orca.ensure_server()
|
assert pio.orca.status.port is not None
assert pio.orca.status.pid is not None
server_port = pio.orca.status.port
server_pid = pio.orca.status.pid
# Make sure server has time to start up
time.sleep(10)
# Check that server process number is valid
assert | psutil.pid_exists(server_pid)
# Build server URL
server_url = "http://localhost:%s" % server_port
# ping server
assert ping_pongs(server_url)
# shut down server
pio.orca.shutdown_server()
# Check that server process number no longer exists
assert not psutil.pid_exists(server_pid)
# Check that ping is no longer answered
assert not ping_pongs(server_url)
def test_server_timeout_shutdown():
# Configure server to shutdown after 10 seconds without
# calls to ensure_orca_server
pio.orca.config.timeout = 10
pio.orca.ensure_server()
server_port = pio.orca.status.port
server_pid = pio.orca.status.pid
# Build server URL
server_url = "http://localhost:%s" % server_port
# Check that server process number is valid
assert psutil.pid_exists(server_pid)
for i in range(3):
# Sleep for just under 10 seconds
time.sleep(8)
assert ping_pongs(server_url)
assert psutil.pid_exists(server_pid)
pio.orca.ensure_server()
# Sleep just over 10 seconds, server should then auto shutdown
time.sleep(11)
# Check that server process number no longer exists
assert not psutil.pid_exists(server_pid)
# Check that ping is no longer answered
assert not ping_pongs(server_url)
def test_external_server_url():
# Build server url
port = find_open_port()
server_url = "http://{hostname}:{port}".format(hostname="localhost", port=port)
# Build external orca command
orca_path = which("orca")
cmd_list = [orca_path] + [
"serve",
"-p",
str(port),
"--plotly",
pio.orca.config.plotlyjs,
"--graph-only",
]
# Run orca as subprocess to simulate external orca server
DEVNULL = open(os.devnull, "wb")
with orca_env():
proc = subprocess.Popen(cmd_list, stdout=DEVNULL)
# Start plotly managed orca server so we can ensure it gets shut down properly
pio.orca.config.port = port
pio.orca.ensure_server()
assert pio.orca.status.state == "running"
# Configure orca to use external server
pio.orca.config.server_url = server_url
# Make sure that the locally managed orca server has been shutdown and the local
# config options have been cleared
assert pio.orca.status.state == "unvalidated"
assert pio.orca.config.port is None
fig = go.Figure()
img_bytes = pio.to_image(fig, format="svg")
assert img_bytes.startswith(b"<svg class")
# Kill server orca process
proc.terminate()
|
elarsonSU/egret | egret.py | Python | gpl-3.0 | 7,045 | 0.021008 | # egret.py: Command line interface for EGRET
#
# Copyright (C) 2016-2018 Eric Larson and Anna Kirk
# elarson@seattleu.edu
#
# This file is part of EGRET.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import egret_ext
from optparse import OptionParser
#import time
# Precondition: regexStr successfully compiles and all strings in testStrings
# match the regular expression
def get_group_info(regexStr, testStrings, namedOnly):
# check for empty list
if len(testStrings) == 0:
return {}
# compile regex
regex = re.compile(regexStr)
# determine if there are named groups, numbered groups, or no groups
match = regex.fullmatch(testStrings[0])
if len(match.groupdict()) != 0:
useNames = True
names = list(match.groupdict().keys())
nameList = []
for name in names:
r = r"\?P<" + name
start = re.search(r, regexStr).start()
nameList.append((start, name))
nameList = sorted(nameList)
groupHdr = [ name for (start, name) in nameList ]
elif len(match.groups()) != 0:
if namedOnly:
return None
useNames = False
else:
return None
# get groups for each string
groupDict = {}
for testStr in testStrings:
match = regex.fullmatch(testStr)
if useNames:
g = match.groupdict()
groupList = []
for i in groupHdr:
groupList.append({i: g[i]})
groupDict[testStr] = groupList
else:
groupDict[testStr] = match.groups()
return groupDict
parser = OptionParser()
parser.add_option("-f", "--file", dest = "fileName", help = "file containing regex")
parser.add_option("-r", "--regex", dest = "regex", help = "regular expression")
parser.add_option("-b", "--base_substring", dest = "baseSubstring",
default = "evil", help = "base substring for regex strings")
parser.add_option("-o", "--output_file", des | t = "outputFile", help = "output file name")
parser.add_option("-d", "--debug", action = "store_true", dest = "debugMode",
default = False, help = "display debug info")
parser.add_option("-s", "--stat", action = "store_true", dest = "statMode",
default = False, help = "display stats")
parser.add_option("-g", "--groups", action = "store_true", dest = "showGroups",
default = False, help = "show groups")
parser.add_option("-n", "--named_groups", action = "store_true", dest = "showNamedGroups",
| default = False, help = "only show named groups")
opts, args = parser.parse_args()
# check for valid command lines
if opts.fileName != None and opts.regex != None:
print("Cannot specify both a regular expression and input file")
sys.exit(-1)
# get the regular expression
descStr = ""
if opts.fileName != None:
inFile = open(opts.fileName)
regexStr = inFile.readline().rstrip()
try:
descStr = inFile.readline().rstrip()
except:
descStr = ""
inFile.close()
elif opts.regex != None:
regexStr = opts.regex
else:
regexStr = input("Enter a Regular Expression: ")
# compile the regular expression
try:
regex = re.compile(regexStr)
# execute regex-test
#start_time = time.process_time()
inputStrs = egret_ext.run(regexStr, opts.baseSubstring,
False, False, opts.debugMode, opts.statMode)
status = inputStrs[0]
hasError = (status[0:5] == "ERROR")
except re.error as e:
status = "ERROR (compiler error): Regular expression did not compile: " + str(e)
hasError = True
if hasError:
alerts = [status]
else:
idx = 0
line = inputStrs[idx]
while line != "BEGIN":
idx += 1;
line = inputStrs[idx]
if idx == 0:
alerts = []
inputStrs = inputStrs[1:]
else:
alerts = inputStrs[:idx]
inputStrs = inputStrs[idx+1:]
if not hasError:
# test each string against the regex
matches = []
nonMatches = []
for inputStr in inputStrs:
search = regex.fullmatch(inputStr)
if search:
matches.append(inputStr)
else:
nonMatches.append(inputStr)
#elapsed_time = time.process_time() - start_time
# display groups if requested
if opts.showGroups or opts.showNamedGroups:
groupDict = get_group_info(regexStr, matches, opts.showNamedGroups)
if groupDict == None:
showGroups = False
if opts.showGroups:
if hasAlert:
status = status + "Regex does not have any capturing groups\n"
else:
hasAlert = True
status = "Regex does not have any capturing groups\n"
else:
showGroups = True
maxLength = 7 # smallest size of format
for inputStr in matches:
if len(inputStr) > maxLength:
maxLength = len(inputStr)
groupFmt = "{0:" + str(maxLength) + "} {1}"
else:
showGroups = False
# print the stats
if opts.statMode:
fmt = "{0:30}| {1}"
print("--------------------------------------")
print(fmt.format("Matches", len(matches)))
print(fmt.format("Non-matches", len(nonMatches)))
#print(fmt.format("Time", elapsed_time))
# create the output header
header = "Regex: " + regexStr + "\n\n"
if descStr != "":
header += ("Description: " + descStr + "\n\n")
for line in alerts:
header += line
header += "\n"
# write the output header
if opts.outputFile:
outFile = open(opts.outputFile, 'w')
outFile.write(header)
if hasError:
outFile.close();
sys.exit(-1);
else:
print()
print(header, end='')
if hasError:
sys.exit(-1);
# print the match strings
if opts.outputFile:
outFile.write("Matches:\n")
else:
print("Matches:")
for inputStr in sorted(matches):
if inputStr == "":
dispStr = "<empty>"
else:
dispStr = inputStr
if showGroups:
dispStr = groupFmt.format(dispStr, str(groupDict[inputStr]))
if opts.outputFile:
outFile.write(dispStr + "\n")
else:
print(dispStr)
# print the non match strings
if opts.outputFile:
outFile.write("\nNon-matches:\n")
else:
print("\nNon-matches:")
for inputStr in sorted(nonMatches):
if inputStr == "":
dispStr = "<empty>"
else:
dispStr = inputStr
if opts.outputFile:
outFile.write(dispStr + "\n")
else:
print(dispStr)
# close the output
if opts.outputFile:
outFile.close()
sys.exit(0)
|
sisidra/scalegrease | setup.py | Python | apache-2.0 | 415 | 0.004819 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name=' | scalegrease',
version='1',
url='https://github.com/spotify/scalegrease',
description='A tool chain for executing batch processing jobs',
packages=['scalegrease'],
data_files=[('/etc', ['conf/scalegrease.json'])],
scripts=[
| 'bin/greaserun',
'bin/greaseworker'
]
)
|
steveb/heat | heat/engine/resources/openstack/mistral/workflow.py | Python | apache-2.0 | 27,192 | 0.000037 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils
import six
import yaml
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine import support
class Workflow(signal_responder.SignalResponder,
resource.Resource):
"""A resource that implements Mistral workflow.
Workflow represents a process that can be described in a various number of
ways and that can do some job interesting to the end user. Each workflow
consists of tasks (at least one) describing what exact steps should be made
during workflow execution.
For detailed description how to use Workflow, read Mistral documentation.
"""
support_status = support.SupportStatus(version='2015.1')
default_client_name = 'mistral'
entity = 'workflows'
PROPERTIES = (
NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS,
TASK_DEFAULTS, USE_REQUEST_BODY_AS_INPUT
) = (
'name', 'type', 'description', 'input', 'output', 'tasks', 'params',
'task_defaults', 'use_request_body_as_input'
)
_TASKS_KEYS = (
TASK_NAME, TASK_DESCRIPTION, ON_ERROR, ON_COMPLETE, ON_SUCCESS,
POLICIES, ACTION, WORKFLOW, PUBLISH, TASK_INPUT, REQUIRES,
RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT,
WITH_ITEMS, KEEP_RESULT, TARGET, JOIN
) = (
'name', 'description', 'on_error', 'on_complete', 'on_success',
'policies', 'action', 'workflow', 'publish', 'input', 'requires',
'retry', 'wait_before', 'wait_after', 'pause_before', 'timeout',
'with_items', 'keep_result', 'target', 'join'
)
_TASKS_TASK_DEFAULTS = [
ON_ERROR, ON_COMPLETE, ON_SUCCESS,
REQUIRES, RETRY, WAIT_BEFORE, WAIT_AFTER, PAUSE_BEFORE, TIMEOUT
]
_SIGNAL_DATA_KEYS = (
SIGNAL_DATA_INPUT, SIGNAL_DATA_PARAMS
) = (
'input', 'params'
)
ATTRIBUTES = (
WORKFLOW_DATA, ALARM_URL, EXECUTIONS
) = (
'data', 'alarm_url', 'executions'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Workflow name.')
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Workflow type.'),
constraints=[
constraints.AllowedValues(['direct', 'reverse'])
],
required=True,
update_allowed=True
),
USE_REQUEST_BODY_AS_INPUT: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines the method in which the request body for signaling a '
'workflow would be parsed. In case this property is set to '
'True, the body would be parsed as a simple json where each '
'key is a workflow input, in other cases body would be parsed '
'expecting a specific json format with two keys: "input" and '
'"params".'),
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Workflow description.'),
update_allowed=True
),
INPUT: properties.Schema(
properties.Schema.MAP,
_('Dictionary which contains input for workflow.'),
update_allowed=True
),
OUTPUT: properties.Schema(
properties.Schema.MAP,
_('Any data structure arbitrarily containing YAQL '
'expressions that defines workflow output. May be '
'nested.'),
update_allowed=True
),
PARAMS: properties.Schema(
properties.Schema.MAP,
_("Workflow additional parameters. If Workflow is reverse typed, "
"params requires 'task_name', which defines initial task."),
update_allowed=True
),
TASK_DEFAULTS: properties.Schema(
properties.Schema.MAP,
_("Default settings for some of task "
"attributes defined "
"at workfl | ow level."),
support_status=support.SupportStatus(version='5.0.0'),
schema={
ON_SUCCE | SS: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed successfully.')
),
ON_ERROR: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed with an error.')
),
ON_COMPLETE: properties.Schema(
properties.Schema.LIST,
_('List of tasks which will run after '
'the task has completed regardless of whether '
'it is successful or not.')
),
REQUIRES: properties.Schema(
properties.Schema.LIST,
_('List of tasks which should be executed before '
'this task. Used only in reverse workflows.')
),
RETRY: properties.Schema(
properties.Schema.MAP,
_('Defines a pattern how task should be repeated in '
'case of an error.')
),
WAIT_BEFORE: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait before starting a task.')
),
WAIT_AFTER: properties.Schema(
properties.Schema.INTEGER,
_('Defines a delay in seconds that Mistral Engine '
'should wait after a task has completed before '
'starting next tasks defined in '
'on-success, on-error or on-complete.')
),
PAUSE_BEFORE: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines whether Mistral Engine should put the '
'workflow on hold or not before starting a task.')
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Defines a period of time in seconds after which '
'a task will be failed automatically '
'by engine if hasn\'t completed.')
),
},
update_allowed=True
),
TASKS: properties.Schema(
properties.Schema.LIST,
_('Dictionary containing workflow tasks.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TASK_NAME: properties.Schema(
properties.Schema.STRING,
_('Task name.'),
required=True
),
TASK_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Task description.')
),
TASK_INPUT: properties.Schema(
properties.Schema.MAP,
_('Actual |
thomashuang/white | white/controller/admin/extend.py | Python | gpl-2.0 | 1,282 | 0.0039 | #!/usr/bin/env python
# 2015 Copyright (C) White
#
# This program is | free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
| # This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import render_template
from white.controller import admin_bp as bp, ADMIN, EDITOR
from white.security import security
@bp.route('/extend')
@security(ADMIN)
def extend_index():
return render_template('admin/extend/index.html')
@bp.route('/extend/variable')
@security(ADMIN)
def variable_index():
return render_template('admin/extend/variable/index.html')
@bp.route('/extend/variable/add')
@security(ADMIN)
def variable_add_page():
return render_template('admin/extend/variable/add.html')
@bp.route('/extend/plugin')
@security(ADMIN)
def extend_plugin():
return render_template('admin/extend/plugin/index.html') |
TeamAADGT/CMPUT404-project-socialdistribution | service/posts/views.py | Python | apache-2.0 | 15,357 | 0.004558 | import requests
from django.db.models import Q
from rest_framework import viewsets, views, generics, mixins, status, filters
from rest_framework.decorators import list_route
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from service.authentication.node_basic import NodeBasicAuthentication
from service.posts.pagination import PostsPagination
from service.posts.serializers import PostSerializer, FOAFCheckPostSerializer
from social.app.models.author import Author
from social.app.models.node import Node
from social.app.models.post import Post
class PublicPostsList(generics.ListAPIView):
"""
Returns all local Posts set to public visibility.
Does not require authentication.
For all posts, see `GET /service/author/posts/`.
### Parameters
See below. None are required
### Example Successful Response
See `GET /service/posts/{post_id}`.
"""
pagination_class = PostsPagination
serializer_class = PostSerializer
authentication_classes = (NodeBasicAuthentication,)
filter_backends = (filters.OrderingFilter,)
ordering_fields = ('published', 'title', 'categories', 'contentType',)
ordering = ('-published',)
# No permission class
def get_queryset(self):
remote_node = self.request.user
return get_local_posts(remote_node, public_only=True)
# Defined as a ViewSet so a custom function can be defined to get around schema weirdness -- see all_posts()
class AllPostsViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
pagination_class = PostsPagination
serializer_class = PostSerializer
authentication_classes = (NodeBasicAuthentication,)
permission_classes = (IsAuthenticated,)
filter_backends = (filters.OrderingFilter,)
ordering_fields = ('published', 'title', 'categories', 'contentType',)
ordering = ('-published',)
def get_queryset(self):
remote_node = self.request.user
return get_local_posts(remote_node)
@list_route(methods=['GET'])
def all_posts(self, request, *args, **kwargs):
"""
Returns all local Posts not set to server-only visibility.
For all public posts, see `GET /service/posts/`.
### Example Successful Response
See `GET /service/posts/{post_id}`.
"""
# Needed to make sure this shows up in the schema -- collides with /posts/ otherwise
return self.list(request, *args, **kwargs)
class SpecificPostsViewSet(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
pagination_class = PostsPagination
authentication_classes = (NodeBasicAuthentication,)
permission_classes = (IsAuthenticated,)
def get_serializer_class(self):
if self.action == 'create':
return FOAFCheckPostSerializer
return PostSerializer
def get_queryset(self):
post_id = self.kwargs["pk"]
remote_node = self.request.user
return get_local_posts(remote_node).filter(Q(id=post_id) | Q(parent_post__id=post_id))
def retrieve(self, request, *args, **kwargs):
"""
Returns the local post with the specified ID, if any.
If the local post has an attached image, and the current remote node has permission to view images, the post
containing that image is also returned. In other words, this endpoint will always return 0-2 posts.
### Parameters
* id: The ID of the Post. (required)
### Example Successful Response
{
"count": 2,
"posts": [
{
"title": "sadfsdafsa",
"source": "http://127.0.0.1:8000/service/posts/ab9105af-ba92-41c1-b722-2aaa088a323a",
"origin": "http://127.0.0.1:8000/service/posts/ab9105af-ba92-41c1-b722-2aaa088a323a",
"description": "sdfsad",
"contentType": "text/markdown",
"content": "sdfasdf",
"author": {
"id": "http://127.0.0.1:8000/service/author/7cb311bf-69dd-4945-b610-937d032d6875",
"host": "http://127.0.0.1:8000/service/",
"displayName": "Adam Ford",
"url": "http://127.0.0.1:8000/service/author/7cb311bf-69dd-4945-b610-937d032d6875",
"github": ""
},
"categories": [
"1",
"2",
"3"
],
"comments": [],
"published": "2017-04-11T06:14:47.556000Z",
"id": "ab9105af-ba92-41c1-b722-2aaa088a323a",
"visibility": "PUBLIC",
"visibleTo": [],
"unlisted": false,
"next": "http://127.0.0.1:8000/service/posts/ab9105af-ba92-41c1-b722-2aaa088a323a/comments",
"count": 0,
"size": 5
},
{
"title": "Upload",
"source": "http://127.0.0.1:8000/service/posts/d10a7f31-10ed-4567-a93d-e3e80356b9ab",
"origin": "http://127.0.0.1:8000/service/posts/d10a7f31-10ed-4567-a93d-e3e80356b9ab",
"description": "Upload",
"contentType": "image/png;base64",
"content": "iVBORw0KGgoAAAANSUhEUgAAArIAAAGKCAYAAA...",
"author": {
"id": "http://127.0.0.1:8000/service/author/7cb311bf-69dd-4945-b610-937d032d6875",
"host": "http://127.0.0.1:8000/service/",
"displayName": "Adam Ford",
"url": "http://127.0.0.1:8000/service/author/7cb311bf-69dd-4945-b6 | 10-937d032d6875",
"github": ""
},
"categories": [
"1",
"2",
"3"
],
"comments": [],
"published": "2017-04-11T06:14:48.290000Z",
"id": "d10a7f31-10e | d-4567-a93d-e3e80356b9ab",
"visibility": "PUBLIC",
"visibleTo": [],
"unlisted": false,
"next": "http://127.0.0.1:8000/service/posts/d10a7f31-10ed-4567-a93d-e3e80356b9ab/comments",
"count": 0,
"size": 5
}
],
"next": null,
"query": "posts",
"size": 100,
"previous": null
}
"""
return self.list(self, request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""
Checking whether the requesting author can see this FOAF post or not.
### Parameters
* id: The ID of the Post being requested. (required)
### Expected Input
{
# The requested query. Must be set to "getPost". (required)
"query":"getPost",
# The UUID of the requested Post. (required)
"postid":"{POST_ID}",
# The URI of the requested Post. (required)
"url":"http://service/posts/{POST_ID}",
# Information about the requesting Author. (required)
"author":{
# The URI of the requesting author. (required)
"id":"http://127.0.0.1:5454/service/author/de305d54-75b4-431b-adb2-eb6b9e546013",
# The base service URL of the requesting Author's local node. (required)
"host":"http://127.0.0.1:5454/service/",
# The display name of the requesting Author. (optional)
"displayName":"Jerry Johnson",
# The URI of the requesting author. (required)
"url":"http://127.0.0.1:5454/service/author/de305d54-75b4-431b-adb2-eb6b9e546013",
# The URI of the requesting Author's Github profile. (optional)
"github": "http://github |
alisaifee/flask-limiter | flask_limiter/contrib/__init__.py | Python | mit | 28 | 0 | "" | "Contributed 'recipes' | """
|
Bernardo-MG/dice-notation-python | dice_notation/dice.py | Python | mit | 3,766 | 0 | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from random import randint
"""
Dice classes.
These are just plain dice, not dice notation, even thought they may be used
for handling that too.
The classes in this file will allow creating and using dice and rollable
entities, which are able to generate a random value.
There are two dice classes, the Dice is just the bare dice, while the
RollableDice is an extension, which allows rolling the dice.
"""
__author__ = 'Benardo Martínez Garrido'
__license__ = 'MIT'
class Rollable(object, metaclass=ABCMeta):
"""
Interface for rollable classes.
While rolling implies using dice to generate a random value, this interface
just takes care of generating a random value.
This way it not only can support any kind of dice, but also more complex
constructions such as dice notation expressions, where calling the roll
method would execute the full expression.
As such, the value generated by rolling may be anything.
"""
def __init__(self):
pass
@abstractmethod
def roll(self):
"""
Generates a random value.
This can be anything, the only expectation is that the output
is randomized somehow.
"""
| raise NotImplementedError('The roll method must be implemented')
class Dice(object):
"""
A group of dice, all with the same number of sides. Such a group is just
composed of a quantity of dice, and their number of sides.
Both the quantity and the number of sides are expected to be positive, as
any other value would make no sense.
No other limitation is expected. In the real world the nu | mber of sides
which a die may physically have are limited by the rules of geometry,
but there is no reason to take care of that.
"""
def __init__(self, quantity, sides):
super(Dice, self).__init__()
self._quantity = quantity
self._sides = sides
def __str__(self):
return '%sd%s' % (self._quantity, self._sides)
def __repr__(self):
return '<class %s>(quantity=%r, sides=%r)' % \
(self.__class__.__name__, self._quantity, self._sides)
@property
def quantity(self):
"""
The number of dice which compose this group.
This is expected to be a positive value or zero.
:return: the number of dice
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
self._quantity = quantity
@property
def sides(self):
"""
The number of sides each die has.
All the dice in the group have the same number of sides.
This is expected to be a positive value or zero.
:return: the number of sides
"""
return self._sides
@sides.setter
def sides(self, sides):
self._sides = sides
class RollableDice(Dice, Rollable):
"""
A rollable dice group.
The result of calling the roll method will be an integer, which will be
between 1 and the number of sides. Actually one number will be generated
like that as many times as the value of the quantity field, and all those
values will be added, and then returned.
"""
def __init__(self, quantity, sides):
super(RollableDice, self).__init__(quantity, sides)
def roll(self):
result = 0
if self.quantity == 0 or self.sides == 0:
result = 0
elif self.quantity is None or self.sides is None:
result = None
elif self.quantity > 0 and self.sides > 0:
for x in range(self.quantity):
result += randint(1, self.sides)
else:
result = None
return result
|
bcgov/gwells | app/backend/wells/migrations/0108_auto_20200213_1741.py | Python | apache-2.0 | 3,936 | 0.003049 | # Generated by Django 2.2.10 on 2020-02-13 17:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0107_auto_20200116_2328'),
]
operations = [
migrations.AlterField(
model_name='activitysubmissionlinerperforation',
name='activity_submission',
field=models.ForeignKey(blank=True, db_column='filing_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linerperforation_set', to='wells.ActivitySubmission'),
),
migrations.AlterField(
model_name='casing',
name='activity_submission',
field=models.ForeignKey(blank=True, db_column='filing_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='casing_set', to='wells.ActivitySubmission'),
),
migrations.AlterField(
model_name='casing',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='casing_set', to='wells.Well'),
),
migrations.AlterField(
model_name='decommissiondescription',
name='activity_submission',
field=models.ForeignKey(blank=True, db_column='filing_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='decommission_description_set', to='wells.ActivitySubmission'),
),
migrations.AlterField(
model_name='decommissiondescription',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='decommission_description_set', to='wells.Well'),
),
migrations.AlterField(
model_name='fieldsprovided',
name='activity_submission',
field=models.OneToOneField(db_column='filing_number', on_delete=django.db.models.deletion.PROTECT, primary_key=True, related_name='fields_provided', serialize=False, to='wells.ActivitySubmission'),
),
migrations.AlterField(
model_name='hydraulicproperty',
name='well',
field=models.ForeignKey(db_column='well_tag_number', on_delete=django.db.models.deletion.PROTECT, to='wells.Well'),
),
migrations.AlterField(
model_name='linerperforation',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linerperforation_set', to='wells.Well'),
),
migrations.AlterField(
model_name='lithologydescription',
name='activity_submission',
field=models.ForeignKey(blank=True, db_column='filing_numb | er', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='lithologydescription_set', to='wells | .ActivitySubmission'),
),
migrations.AlterField(
model_name='lithologydescription',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='lithologydescription_set', to='wells.Well'),
),
migrations.AlterField(
model_name='screen',
name='activity_submission',
field=models.ForeignKey(blank=True, db_column='filing_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='screen_set', to='wells.ActivitySubmission'),
),
migrations.AlterField(
model_name='screen',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='screen_set', to='wells.Well'),
),
]
|
Hironsan/natural-language-preprocessings | tests/test_padding.py | Python | mit | 3,501 | 0 | import unittest
from numpy.testing import assert_allclose
from preprocessings.padding import pad_sequences, pad_char_sequences
class TestPadding(unittest.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = pad_sequences(a, maxlen=3, padding='pre')
assert_allclose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = pad_sequences(a, maxlen=3, padding='post')
assert_allclose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = pad_sequences(a, maxlen=2, truncating='pre')
assert_allclose(b, | [[0, 1], [1, 2], [2, 3]])
b = pad_sequences(a, maxlen=2, truncating='post')
assert_allclose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = pad_sequence | s(a, maxlen=3, value=1)
assert_allclose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_vector(self):
a = [[[1, 1]],
[[2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]]
# test padding
b = pad_sequences(a, maxlen=3, padding='pre')
assert_allclose(b, [[[0, 0], [0, 0], [1, 1]],
[[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
b = pad_sequences(a, maxlen=3, padding='post')
assert_allclose(b, [[[1, 1], [0, 0], [0, 0]],
[[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]]])
# test truncating
b = pad_sequences(a, maxlen=2, truncating='pre')
assert_allclose(b, [[[0, 0], [1, 1]],
[[2, 1], [2, 2]],
[[3, 2], [3, 3]]])
b = pad_sequences(a, maxlen=2, truncating='post')
assert_allclose(b, [[[0, 0], [1, 1]],
[[2, 1], [2, 2]],
[[3, 1], [3, 2]]])
# test value
b = pad_sequences(a, maxlen=3, value=1)
assert_allclose(b, [[[1, 1], [1, 1], [1, 1]],
[[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
def test_pad_char_sequences(self):
a = [[[1]],
[[2], [2, 2]],
[[3], [3, 2], [3, 3, 3]]]
# test padding
b = pad_char_sequences(a, padding='pre')
assert_allclose(b, [[[0, 0, 0], [0, 0, 0], [0, 0, 1]],
[[0, 0, 0], [0, 0, 2], [0, 2, 2]],
[[0, 0, 3], [0, 3, 2], [3, 3, 3]]])
b = pad_char_sequences(a, padding='post')
assert_allclose(b, [[[1, 0, 0], [0, 0, 0], [0, 0, 0]],
[[2, 0, 0], [2, 2, 0], [0, 0, 0]],
[[3, 0, 0], [3, 2, 0], [3, 3, 3]]])
# test truncating
b = pad_char_sequences(a, maxlen=2, padding='pre')
assert_allclose(b, [[[0, 0], [0, 0], [0, 1]],
[[0, 0], [0, 2], [2, 2]],
[[0, 3], [3, 2], [3, 3]]])
b = pad_char_sequences(a, maxlen=2, padding='post')
assert_allclose(b, [[[1, 0], [0, 0], [0, 0]],
[[2, 0], [2, 2], [0, 0]],
[[3, 0], [3, 2], [3, 3]]])
# test value
b = pad_char_sequences(a, padding='pre', value=1)
assert_allclose(b, [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 2], [1, 2, 2]],
[[1, 1, 3], [1, 3, 2], [3, 3, 3]]])
|
backupManager/pyflag | src/plugins_old/NetworkForensics/ProtocolHandlers/POP.py | Python | gpl-2.0 | 9,518 | 0.015444 | """ This module implements features specific for POP Processing """
# Michael Cohen <scudette@users.sourceforge.net>
# Gavin Jackson <Gavz@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
import pyflag.conf
config=pyflag.conf.ConfObject()
from pyflag.Scanner import *
import pyflag.Scanner as Scanner
import pyflag.Time as Time
import re, posixpath
from NetworkScanner import *
config.add_option("POP3_PORTS", default='[110,]',
help="A list of ports to be considered for POP transactions")
class POPException(Exception):
""" Raised if line is an in | valid pop command """
class POP:
""" Class managing the pop connecti | on information """
def __init__(self,fd):
self.fd=fd
self.dispatcher={
"+OK" :self.NOOP,
"-ERR" :self.NOOP,
"DELE" :self.NOOP,
"QUIT" :self.NOOP,
}
self.username=''
self.password=''
self.files=[]
def read_multi_response(self):
""" Reads the next few lines off fd and returns a combined response """
result=''
while 1:
line = self.fd.readline()
if not line or line=='.\r\n':
return result
## This cleans out escaped lines as mentioned in the RFC
if line.startswith('.'): line=line[1:]
result+=line
def NOOP(self,args):
""" A do nothing parser """
def CAPA(self,args):
## We just ignore this
self.read_multi_response()
def USER(self,args):
response=self.fd.readline()
self.username=args[0]
def PASS(self,args):
response=self.fd.readline()
if response.startswith("+OK"):
self.password=args[0]
pyflaglog.log(pyflaglog.DEBUG,"Login for %s successful with password %s" % (self.username,self.password))
def STAT(self,args):
""" We ignore STAT commands """
response=self.fd.readline()
def LIST(self,args):
""" We ignore LIST commands """
self.read_multi_response()
def UIDL(self,args):
self.read_multi_response()
#GJ: We _really_ needed to handle this command
def TOP(self,args):
## Read the first line to see if it has been successful:
response=self.fd.readline()
if response.startswith("+OK"):
start = self.fd.tell()
data = self.read_multi_response()
length = len(data)
pyflaglog.log(pyflaglog.DEBUG,"Message %s starts at %s in stream and is %s long" % (args[0],start,length))
self.files.append((args[0],(start,length)))
def RETR(self,args):
## Read the first line to see if it has been successful:
response=self.fd.readline()
if response.startswith("+OK"):
start = self.fd.tell()
data = self.read_multi_response()
length = len(data)
pyflaglog.log(pyflaglog.DEBUG,"Message %s starts at %s in stream and is %s long" % (args[0],start,length))
self.files.append((args[0],(start,length)))
def parse(self):
line = self.fd.readline().strip()
if not line: return 0
tmp = line.split(" ")
command=tmp[0]
args=tmp[1:]
## Dispatch the command handler:
try:
self.__class__.__dict__[command](self,args)
except KeyError,e:
try:
self.dispatcher[command](args)
except KeyError:
raise POPException("POP: Command %r not implemented." % (command))
except Exception,e:
raise POPException("POP: Unable to parse line: %s." % (line))
return line
class EmailTables(FlagFramework.EventHandler):
def create(self, dbh, case):
## This table stores common usernames/passwords:
dbh.execute(
""" CREATE TABLE if not exists `passwords` (
`inode_id` INT,
`username` VARCHAR(255) NOT NULL,
`password` VARCHAR(255) NOT NULL,
`type` VARCHAR(255) NOT NULL
) """)
class POPScanner(StreamScannerFactory):
""" Collect information about POP transactions.
"""
default = True
group = 'NetworkScanners'
def process_stream(self, stream, factories):
forward_stream, reverse_stream = self.stream_to_server(stream, "POP3")
if not reverse_stream or not forward_stream: return
combined_inode = "I%s|S%s/%s" % (stream.fd.name, forward_stream,reverse_stream)
pyflaglog.log(pyflaglog.DEBUG,"Openning %s for POP3" % combined_inode)
## We open the file and scan it for emails:
fd = self.fsfd.open(inode=combined_inode)
inode_id = 0
p=POP(fd)
while 1:
try:
if not p.parse():
break
except POPException,e:
pyflaglog.log(pyflaglog.DEBUG,"%s" % e)
for f in p.files:
## Add a new VFS node
offset, length = f[1]
new_inode="%s|o%s:%s" % (combined_inode,offset, length)
ds_timestamp = Time.convert(stream.ts_sec, case=self.case, evidence_tz="UTC")
date_str = ds_timestamp.split(" ")[0]
path, inode, inode_id = self.fsfd.lookup(inode=combined_inode)
path=posixpath.normpath(path+"/../../../../../")
inode_id = self.fsfd.VFSCreate(None,new_inode,
"%s/POP/%s/Message_%s" % (path, date_str,
f[0]),
mtime=stream.ts_sec,
size = length
)
## Scan the new file using the scanner train. If
## the user chose the RFC2822 scanner, we will be
## able to understand this:
self.scan_as_file(new_inode, factories)
## If there is any authentication information in here,
## we save it for Ron:
dbh = DB.DBO(self.case)
if p.username and p.password:
dbh.execute("insert into passwords set inode_id=%r,username=%r,password=%r,type='POP3'",(
inode_id, p.username, p.password))
class Scan(StreamTypeScan):
types = [ "protocol/x-pop-request" ]
import pyflag.Magic as Magic
class POPRequstStream(Magic.Magic):
""" Detect POP Request stream """
type = "POP Request Stream"
mime = "protocol/x-pop-request"
default_score = 20
regex_rules = [
## These are the most common pop commands - we look for at least 5 of them:
( "CAPA", (0,50)),
( "\nUSER ", (0,50)),
( "\nPASS ", (0,50)),
( "LIST\r\n", (0,50)),
( "UIDL\r\n", (0,50)),
( "RETR [0-9]+", (0,50)),
( "DELE [0-9]+", (0,50))
]
samples = [
( 100, \
"""CAPA
USER thadon
PASS password1
CAPA
LIST
UIDL
RETR 1
DELE 1
QUIT
""")]
class POPResponseStream(Magic.Magic):
""" Detect POP Response stream """
type = "POP Response Stream"
mime = "protocol/x-pop-response"
default_sco |
AlexBoogaard/Sick-Beard-Torrent-Edition | sickbeard/providers/bithdtv.py | Python | gpl-3.0 | 11,100 | 0.011622 | ###################################################################################################
# Author: Jodi Jones <venom@gen-x.co.nz>
# URL: https://github.com/VeNoMouS/Sick-Beard
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
###################################################################################################
import os
import re
import sys
import urllib
import generic
import datetime
import sickbeard
import exceptions
from lib import requests
from xml.sax.saxutils import escape
from sickbeard import db
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import ex
from sickbeard.common import Quality
from sickbeard.common import Overview
from sickbeard import show_name_helpers
class BitHDTVProvider(generic.TorrentProvider):
###################################################################################################
def __init__(self):
generic.TorrentProvider.__init__(self, "BitHDTV")
self.cache = BitHDTVCache(self)
self.name = "BitHDTV"
self.session = None
self.supportsBacklog = True
self.url = 'https://www.bit-hdtv.com/'
logger.log("[" + self.name + "] initializing...")
###################################################################################################
def isEnabled(self):
return sickbeard.BITHDTV
###################################################################################################
def imageName(self):
return 'bithdtv.png'
###################################################################################################
def getQuality(self, item):
quality = Quality.nameQuality(item[0])
return quality
###################################################################################################
def _get_title_and_url(self, item):
return item
###################################################################################################
def _get_airbydate_season_range(self, season):
if season == None:
return ()
year, month = map(int, season.split('-'))
min_date = datetime.date(year, month, 1)
if month == 12:
max_date = datetime.date(year, month, 31)
else:
max_date = datetime.date(year, month+1, 1) - datetime.timedelta(days=1)
return (min_date, max_date)
###################################################################################################
def _get_season_search_strings(self, show, season=None, scene=False):
search_string = []
if not show:
return []
myDB = db.DBConnection()
if show.air_by_date:
(min_date, max_date) = self._get_airbydate_season_range(season)
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= ?", [show.tvdbid, min_date.toordinal(), max_date.toordinal()])
else:
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND scene_season = ?", [show.tvdbid, season])
for sqlEp in sqlResults:
if show.getOverview(int(sqlEp["status"])) in (Overview.WANTED, Overview.QUAL):
if show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ str(datetime.date.fromordinal(sqlEp["ai | rdate"])).replace('-', '.')
search_string.append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ sickbeard.config.naming_ep_type[2] | % {'seasonnumber': season, 'episodenumber': int(sqlEp["episode"])}
search_string.append(ep_string)
return search_string
###################################################################################################
def _get_episode_search_strings(self, ep_obj):
search_string = []
if not ep_obj:
return []
if ep_obj.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ str(ep_obj.airdate).replace('-', '.')
search_string.append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season, 'episodenumber': ep_obj.scene_episode}
search_string.append(ep_string)
return search_string
###################################################################################################
def _doSearch(self, search_params, show=None):
self.search_results = []
logger.log("[" + self.name + "] Performing Search: {0}".format(search_params))
search_params = search_params.replace(" ","+")
logger.log("[" + self.name + "] Searching TV Section")
self.parseResults(self.url + "torrents.php?search=" + urllib.quote(search_params) + "&cat=10")
logger.log("[" + self.name + "] Searching TV Pack Section")
self.parseResults(self.url + "torrents.php?search=" + urllib.quote(search_params) + "&cat=12")
if len(self.search_results):
logger.log("[" + self.name + "] parseResults() Some results found.")
else:
logger.log("[" + self.name + "] parseResults() No results found.")
return self.search_results
###################################################################################################
def parseResults(self, searchUrl):
data = self.getURL(searchUrl)
if data:
logger.log("[" + self.name + "] parseResults() URL: " + searchUrl, logger.DEBUG)
for torrent in re.compile("<td class=detail align=left><a title=\"(?P<title>.*?)\" href.*?<font class=small></font><a href=\"/(?P<url>.*?)\.torrent\"></a>",re.MULTILINE|re.DOTALL).finditer(data):
item = (torrent.group('title').replace('.',' ').decode('utf-8', 'ignore'), self.url + torrent.group('url') + ".torrent")
self.search_results.append(item)
logger.log("[" + self.name + "] parseResults() Title: " + torrent.group('title').decode('utf-8', 'ignore'), logger.DEBUG)
else:
logger.log("[" + self.name + "] parseResults() Error no data returned!!")
return self.search_results
###################################################################################################
def getURL(self, url, headers=None):
response = None
if not self.session:
if not self._doLogin():
return response
if not headers:
headers = []
try:
response = self.session.get(url, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log("[" + self.name + "] getURL() Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
|
deathglitch/metarigging | python/metarig/cogcomponent.py | Python | mit | 7,363 | 0.011544 | import pymel.core as pm
import component
import keyablecomponent
import metautil.nsutil as nsutil
import metautil.miscutil as miscutil
import metautil.rigutil as rigutil
import rigging
class CogComponent(keyablecomponent.KeyableComponent):
LATEST_VERSION = 1
@staticmethod
def create(metanode_parent, start_joint, end_joint, side, region, scale=10):
orig_ns = pm.Namespace.getCurrent()
new_ns = nsutil.get_namespace_object(metanode_parent)
new_ns.setCurrent()
cognode = keyablecomponent.KeyableComponent.create(metanode_parent, CogComponent.__name__, CogComponent.LATEST_VERSION, side, region, component_pivot = start_joint)
chain_between = miscutil.get_nodes_between(start_joint, end_joint, criteria = lambda x: isinstance(x, pm.nodetypes.Joint))
#metanode_root = metaroot.get_metaroot(metanode_parent)
do_not_touch = cognode.get_do_not_touch()
ctrls_group = cognode.get_ctrls_group()
cog_chain = rigging.create_duplicate_chain(start_joint, end_joint)
cog_chain_result = rigging.rig_cog_chain(cog_chain[0], cog_chain[-1], scale)
cog_grip = cog_chain_result['grips'][0]
cog_zero = rigutil.get_zero_transform(cog_grip)
pm.parent(cog_zero, ctrls_group)
cog_chain[0].setParent(do_not_touch)
#do_not_touch.setParent(metanode_parent.get_do_not_touch_group())
#ctrls_group.setParent(metanode_parent.get_ctrls_group())
cognode.connect_node_to_metanode(cog_grip, 'cog_grip')
cognode.connect_ordered_nodes_to_metanode(cog_chain, 'cog_chain')
cognode.connect_ordered_nodes_to_metanode(chain_between[:-1], 'bind_joints')
cog_grip.lock_and_hide_attrs(['sx', 'sy', 'sz', 'v'])
pm.parentConstraint(cog_chain[0], chain_between[0])
orig_ns.setCurrent()
return cognode
def get_grips(self):
grips = self.cog_grip.listConnections()
grips = map(lambda x: rigging.Grip(x), grips)
return grips
def get_cog_grip(self):
return self.cog_grip.listConnections()[0]
def get_bind_joints(self):
return self.bind_joints.listConnections()
def select_grips(self):
pm.select(self.get_grips())
def key_grips(self):
grips = self.get_grips()
pm.setKeyframe(grips)
def to_default_pose(self):
grips = self.get_grips()
for grip in grips:
miscutil.reset_attrs(grip)
def attach_to_component(self, attach_component, location, point = True, orient = True):
#attach_component = component.Component(attach_component)
if not isinstance(attach_component, component.Component):
raise StandardError("can't connect to metanode, {0}, it is not a component".format(attach_component))
parent_node = attach_component._find_attach_point(location)
grip_group = self.get_ctrls_group()
attrs = grip_group.translate.children() + grip_group.rotate.children()
metautil.unlock_and_show_attrs(attrs)
if point:
#delete old constraint
grip_cons = grip_group.tx.listConnections(d=0) + grip_group.ty.listConnections(d=0) + grip_group.tz.listConnections(d=0)
for grip_con in grip_cons:
if pm.objExists(grip_con):
pm.delete(grip_con)
#new constraint
pm.parentConstraint(parent_node, grip_group, w=1, mo=1, skipRotate = ["x", "y", "z"])
#connect attrs
attach_component.attached_components >> self.attach_point_component
self.attach_point_location.set(location)
if orient:
#delete old constraints
grip_cons = grip_group.rx.listConnections(d=0) + grip_group.ry.listConnections(d=0) + grip_group.rz.listConnections(d=0)
for grip_con in grip_cons:
if pm.objExists(grip_con):
pm.delete(grip_con)
#new constraint
pm.parentConstraint(parent_node, grip_group, w=1, mo=1, skipTranslate = ["x", "y", "z"])
#connect attrs
attach_component.attached_components >> self.attach_orient_component
self.attach_orient_location.set(location)
metautil.lock_and_hide_attr_objects(attrs)
return
def attach_to_joint(self, attach_joint, point = True, orient = True):
grip_group = self.get_ctrls_group()
attrs = grip_group.translate.children() + grip_group.rotate.children()
metautil.unlock_and_show_attrs(attrs)
if point:
#delete old constraint
grip_cons = grip_group.tx.listConnections(d=0) + grip_group.ty.listConnections(d=0) + grip_group.tz.listConnections(d=0)
for grip_con in grip_cons:
if pm.objExists(grip_con):
pm.delete(grip_con)
#new constraint
pm.parentConstraint(attach_joint, grip_group, w=1, mo=1, skipRotate = ["x", "y", "z"])
#connect attrs
self.attach_point_location.set(attach_joint.name())
if orient:
#delete old constraints
grip_cons = grip_group.rx.listConnections(d=0) + grip_group.ry.listConnections(d=0) + grip_group.rz.listConnections(d=0)
for grip_con in grip_cons:
if pm.objExists(grip_con):
pm.delete(grip_con)
#new constraint
pm.parentConstraint(attach_joint, grip_group, w=1, mo=1, skipTranslate = ["x", "y", "z"])
#connect attrs
self.attach_orient_location.set(attach_joint.name())
metautil.lock_and_hide_attr_objects(attrs)
return
def attach_to_skeleton(self, namespace = None):
'''Attaches grips to a baked skeleton in the specified namespace'''
cog_grip = map(lambda x: rigging.Grip(x), self.cog_grip.listConnections())
bind_joints = self.bind_joints.listConnections()
if namespace is None:
namespace = miscutil.get_namespace(bind_joints[0])
bind_joints = map(lambda x: x.swapNamespace(namespace), bind_joints)
const = miscutil.parent_constraint_safe(bind_joints[0], cog_grip, 0)
constraints = [const]
return [cog_grips, constraints]
def bake_and_detach(self, objects, constraints):
'''Bakes grips and detaches from baked skeleton.'''
start_time = miscutil.get_start_time()
end_time = miscutil.get_end_time()
miscutil.bake(objects = objects, time = (start_time, end_time))
pm.delete(constraints)
return
def bake_to_skeleton(self):
bind_joints = self.bind_joints.listConnections()
start_time = miscutil.get_start_time()
end_time = miscutil.get_end_time()
m | iscutil.bake(objects = objects, time = (start_time, end_time))
return
def _find_attach_point(self, location):
return self.get_grips()[-1]
def remove(self, bake = False):
'''remove everything about this rig implementation'''
if bake:
self. | bake_to_skeleton()
grip_group = self.get_ctrls_group()
dnt_group = self.get_do_not_touch()
pm.delete([self, dnt_group, grip_group])
return |
daniel-j/lutris | lutris/util/yaml.py | Python | gpl-3.0 | 853 | 0 | """Utility functions for YAML handling"""
# pylint: disable=no-member
import yaml
from lutris.util.log import logger
from lutris.util.system import path_exists
def read_yaml_from_file(filename):
"""Read filename and return parsed yaml"""
if not path_exists(filename):
return {}
with open(filename, "r") as yaml_file:
try:
yaml_content = yaml.safe_load(yaml_file) or {}
except (yaml.scanner.ScannerError, yaml. | parser.ParserError):
| logger.error("error parsing file %s", filename)
yaml_content = {}
return yaml_content
def write_yaml_to_file(filepath, config):
if not filepath:
raise ValueError("Missing filepath")
yaml_config = yaml.dump(config, default_flow_style=False)
with open(filepath, "w") as filehandler:
filehandler.write(yaml_config)
|
irgmedeiros/folhainvest | tests/context.py | Python | bsd-2-clause | 107 | 0.018692 | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('.. | '))
import fol | hainvest |
yanweifu/reweighted-ws | learning/models/tests/test_nade.py | Python | agpl-3.0 | 797 | 0.005019 | import unittest
import numpy as np
import theano
import theano.tensor as T
from test_rws import RWSLayerTest, RWSTopLayerTest
# Unit Under Test
from learning.models.nade import NADE, | NADETop
#-----------------------------------------------------------------------------
class TestNADETop(RWSTopLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = NADETop(
n_X=8,
n_hid=8,
)
self.layer.setup()
cla | ss TestNADE(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = NADE(
n_X=16,
n_Y=8,
n_hid=8,
)
self.layer.setup()
|
jucimarjr/IPC_2017-1 | lista08/lista08_lista01_questao05.py | Python | apache-2.0 | 1,357 | 0.006752 | #----------------------------------------------- | -----------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Antonio Diego Furtado da Silva 1715310004
# João Victor de Cordeiro 1515140036
# Matheus de Oliveira Marques 1515310514
# Reinaldo da Silva Varas 1715310054
# Silas Castro de Mendonça 1715310066
#
# Lista01 Questão: 5)Faça um programa com uma função chamada somaImposto.
# A função possui dois parâmetros formais: taxaImposto, que é a quantia de imposto sobre vendas expressa em porcentagem
# e custo, que é o custo de um item antes do imposto.
# A função “altera” o valor de custo para incluir o imposto sobre vendas.
#----------------------------------------------------------------------------------------------------------------------
custo = float(input('Informe o custo do produto: '))
taxaImposto = float(input('Informe o valor da taxa de imposto: '))
def somaImposto(taxaImposto, custo):
custo = custo + (custo * taxaImposto / 100.0)
return custo
print("Preço sem Impostos: %.2f" %custo)
custo = somaImposto(taxaImposto, custo)
print("O preço com impostos é: %.2f" %custo)
| |
eddiejessup/clustrous | clustrous/cluster.py | Python | bsd-3-clause | 5,950 | 0 | import numpy as np
from scipy.cluster import hierarchy as hc
from clustrous._periodic_cluster import get_cluster_list
def cluster(r, r_max):
"""
Group a set of points into distinct sets, based on their Euclidean
distance.
Uses the single-linkage criterion, meaning that if the distance between two
points is less than the cut-off distance, they are in the same cluster.
Parameters
----------
r: np.ndarray[ndim=2, shape=(n, d)]
The points to cluster. `n` is the
number of points, `d` is the number of dimensions. r_max: float How
distant points can be while still placed in the same cluster.
Returns
-------
labels: np.ndarray[ndim=1, shape=(n,), dtype=np.int]
For each point, an
integer labelling its cluster. Points with the same integer belong to
the same cluster.
"""
linkage_matrix = hc.linkage(r, method='single', metric='sqeuclidean')
return hc.fcluster(linkage_matrix, t=r_max ** 2, criterion='distance')
def cluster_periodic(r, r_max, L):
"""
Group a set of points into distinct sets, based on their Euclidean
distance, in a periodic space.
Uses the single-linkage criterion, meaning that if the distance between two
points is less than the cut-off distance, they are in the same cluster.
Parameters
----------
r: np.ndarray[ndim=2, shape=(n, d)]
The points to cluster. `n` is the number of points,
`d` is the number of dimensions.
r_max: float
How distant points can be while still placed in the same cluster.
L: float
The size of the system.
Returns
-------
labels: np.ndarray[ndim=1, shape=(n,), dtype=np.int]
For each point, an integer labelling its cluster.
Points with the same integer belong to the same cluster.
"""
# Get a linked list where a closed loop indicates a single cluster.
linked_list = get_cluster_list(r, r_max, L)
# Convert from Fortran 1-based to Python 0-based indexing.
linked_list -= 1
return _get_labels(linked_list)
def n_clusters(labels):
"""Get the number of clusters from a set of labels."""
return len(set(labels))
def cluster_sizes(labels):
"""Get the number of points in each cluster, sorted by label."""
if labels.min() == 0:
return np.bincount(labels)
elif labels.min() == 1:
return np.bincount(labels - 1)
else:
raise Exception
def biggest_cluster_fraction(clust_sizes):
"""Calculate the fraction of points that lie in the biggest cluster.
The measure to some extent indicates the degree to which points belong to
a few clusters. However, it is a bad measure, because it gives results that
are counter-intuitive. For example, these distributions all give the same
result:
- [n - 10, 10]
- [n - 10, 5, 5]
- [n - 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
Parameters
----------
clust_sizes: list[int]
The number of points in each cluster.
Returns
-------
m: float
Cluster measure.
"""
clust_sizes = np.array(clust_sizes)
return clust_sizes.max() / float(clust_sizes.sum())
def _clumpiness(n_cluster, n_tot):
frac = n_cluster / n_tot
clumpiness = (n_cluster - 1.0) / (n_tot - 1.0)
return frac * clumpiness
def clumpiness(clust_sizes):
"""Calculate how 'clumpy' a set of clustered points are.
The measure indicates the degree to which points belong to a few clusters.
This is calculated by finding the clumpiness of a point in each cluster,
which is its contribution to the overall population. This is then
weighted by the number of points with that clumpiness value, and the sum
taken of this over all clusters.
This is chosen to give intuitively sensible orderings to distributions of
points. For example, these are examples of its values for some populations:
- [6]: 1
- [5, 1]: 0.67
- [4, 2]: 0.47
- [4, 1, 1]: 0.4
- [3, 3]: 0.4
- [2, 1, 1, 1, 1, 1]: 0.05
- [1, 1, 1, 1, 1, 1]: 0
Parameters
----------
clust_sizes: list[int]
The number of points in each cluster.
Returns
-------
| k: float
Clumpiness measure.
"""
return np.sum(_clumpiness(clust_sizes, float(clust_sizes.sum())))
def _get_labels(linked_list):
"""Convert clusters represented as a linked list into a labels array.
For example, `[1, 0, 2, 3]` represents a system where the firs | t two samples
are in a single cluster, because the first points to the second, then the
second points to the first. The last two point to themselves, and are thus
in a cluster with a single sample.
This function converts such a list into an array of integers, whose entries
begin at zero and increase consecutively. Each integer identifies a sample
as belonging to a particular cluster, labelled by that integer.
Parameters
----------
linked_list: list[int]
List of integers whose minimum must be zero,
and maximum must be `len(linked_list)`.
Returns
-------
labels: np.ndarray[dtype=int, shape=(len(linked_list),)]
Cluster labels, starting at zero.
"""
# `-1` indicates a sample that has not been visited yet.
labels = np.full([len(linked_list)], -1, dtype=np.int)
# Initial label is zero.
label = 0
# Each unvisited index represents a new cluster.
for i_base in range(len(linked_list)):
if labels[i_base] == -1:
i = i_base
# Iterate through the linked samples setting their label.
while True:
labels[i] = label
i_next = linked_list[i]
# When we arrive back at the start,
# we are finished with that cluster.
if i_next == i_base:
break
i = i_next
label += 1
return labels
|
vitan/blaze | blaze/json.py | Python | bsd-3-clause | 566 | 0.0053 | from __future__ import absolute_import, division, print_function
import json
from toolz import map, partial
import gzip
from .resource import reso | urce
__all__ = 'resource',
@resource.register('.*\.json')
def resource_json(uri, open=open):
f = open(uri)
try:
data = json.load(f)
f.close()
return data
except ValueError:
f = open(uri)
data = map(json.loads, f)
return data
@resource.register('.*\.json.gz')
def resource_json_gzip(uri):
return resource_json(uri, open=partial(gzip.op | en, mode='rt'))
|
somewun/Calcwylator | Calcwylator.py | Python | gpl-3.0 | 4,221 | 0.007818 | #Calcwylator using Python 3 and tkinter
print("""Calcwylator Version 0.1.0 Copyright (C) 2016 Somewun
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.""")
from tkinter import *
from math import *
from tkinter.messa | gebox import *
#menu functions
def About():
showinfo("Help", "This is | a simple calculator using Python 3 and Tkinter. \nThis is going to be the help information.")
def license():
showinfo("License",
"""Calcwylator, a simple lightweight calculator. Version 0.1.0.
Copyright (C) 2016 Somewun
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.""")
#Maths functions
def evaluate():
res.configure(text="Answer: " + str(eval(entry.get())))
ans = float(eval(entry.get()))
print(ans)
#Entry key functions
def zero():
entry.insert(END, "0")
def one():
entry.insert(END, "1")
def two():
entry.insert(END, "2")
def three():
entry.insert(END, "3")
def four():
entry.insert(END, "4")
def five():
entry.insert(END, "5")
def six():
entry.insert(END, "6")
def seven():
entry.insert(END, "7")
def eight():
entry.insert(END, "8")
def nine():
entry.insert(END, "9")
def point():
entry.insert(END, ".")
def times():
entry.insert(END, "*")
def divide():
entry.insert(END, "/")
def plus():
entry.insert(END, "+")
def minus():
entry.insert(END, "-")
def openbrack():
entry.insert(END, "(")
def closebrack():
entry.insert(END, ")")
def clear():
entry.delete(0, END)
w = Tk()
w.title("Calcwylator")
w.minsize(300,200)
menu = Menu(w)
w.config(menu=menu)
filemenu = Menu(menu)
menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Exit", command=w.quit)
helpmenu = Menu(menu)
menu.add_cascade(label="About", menu=helpmenu)
helpmenu.add_command(label="Help", command=About)
helpmenu.add_command(label="License", command=license)
Label(w, text="Your Expression:").grid(row=0,column=0)
entry = Entry(w)
entry.bind("<Return>", evaluate)
entry.grid(row=0, column=1)
res = Label(w)
res.grid(row=0, column=2)
Button(text='7', command=seven).grid(row=1, column=0, ipadx=20)
Button(text='8', command=eight).grid(row=1, column=1, ipadx=20)
Button(text='9', command=nine).grid(row=1, column=2, ipadx=20)
Button(text='4', command=four).grid(row=2, column=0, ipadx=20)
Button(text='5', command=five).grid(row=2, column=1, ipadx=20)
Button(text='6', command=six).grid(row=2, column=2, ipadx=20)
Button(text='1', command=one).grid(row=3, column=0, ipadx=20)
Button(text='2', command=two).grid(row=3, column=1, ipadx=20)
Button(text='3', command=three).grid(row=3, column=2, ipadx=20)
Button(text='0', command=zero).grid(row=4, column=1, ipadx=20)
Button(text='.', command=point).grid(row=4, column=0, ipadx=21)
Button(text='=', command=evaluate).grid(row=4, column=2, ipadx=19)
Button(text='+', command=plus).grid(row=1, column=3, ipadx=18)
Button(text='-', command=minus).grid(row=2, column=3, ipadx=20)
Button(text='*', command=times).grid(row=3, column=3, ipadx=20)
Button(text='/', command=divide).grid(row=4, column=3, ipadx=20)
Button(text='(', command=openbrack).grid(row=1, column=4, ipadx=20)
Button(text=')', command=closebrack).grid(row=2, column=4, ipadx=20)
#Button(text='ans', command=ans).grid(row=3, column=4, ipadx=20)
Button(text='C', command=clear).grid(row=4, column=4, ipadx=20)
w.mainloop()
|
artemrizhov/pymorphy | example.py | Python | mit | 1,319 | 0.00453 | #coding: utf-8
import re
import pymorphy
import pymorphy.utils
text = u'''
Сяпала Калуша по напушке и увазила бутявку. И волит:
— Калушата, | калушаточки! Бутявка!
Калушата присяпали и бутявку стрямкали. И подудонились.
А Калуша волит:
— Оее, оее! Бутявка-то некузявая!
Калушата бутявку вычучили.
Бутявка вздребезнулась, сопритюкнулась и усяпала с напушки.
А Калуша волит:
— Бутявок не трямкают. Бутявки дюбые и зюмо-зюмо некузявые. От бутявок дудонятся.
А бутявка волит за напушкой:
— Калушата подудонились! Калушата подудонились! Зюмо некузявые! Пуськи бятые!
'''
r = re.compile('[\W | +-]',re.U)
words = r.split(text.upper())
# тут нужно прописать путь до папки со словарями
morph = pymorphy.get_morph('dicts/converted/ru')
for word in words:
if word:
print word
info = morph.get_graminfo(word)
for form in info:
pymorphy.utils.pprint(form)
|
mschwager/CTFd | CTFd/admin/challenges.py | Python | apache-2.0 | 7,890 | 0.002155 | from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint
from CTFd.utils import admins_only, is_admin, unix_time, get_config, \
set_config, sendmail, rmdir, create_image, delete_image, run_image, container_status, container_ports, \
container_stop, container_start, get_themes, cache, upload_file
from CTFd.models import db, Teams, Solves, Awards, Containers, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError
from CTFd.plugins.keys import get_key_class, KEY_CLASSES
from CTFd.plugins.challenges import get_chal_class, CHALLENGE_CLASSES
import os
admin_challenges = Blueprint('admin_challenges', __name__)
@admin_challenges.route('/admin/chal_types', methods=['GET'])
@admins_only
def admin_chal_types():
data = {}
for class_id in CHALLENGE_CLASSES:
data[class_id] = CHALLENGE_CLASSES.get(class_id).name
return jsonify(data)
@admin_challenges.route('/admin/chals', methods=['POST', 'GET'])
@admins_only
def admin_chals():
if request.method == 'POST':
chals = Challenges.query.add_columns('id', 'name', 'value', 'description', 'category', 'hidden', 'max_attempts').order_by(Challenges.value).all()
teams_with_points = db.session.query(Solves.teamid).join(Teams).filter(
not Teams.banned).group_by(Solves.teamid).count()
json_data = {'game': []}
for x in chals:
solve_count = Solves.query.join(Teams, Solves.teamid == Teams.id).filter(
Solves.chalid == x[1], not Teams.banned).count()
if teams_with_points > 0:
percentage = (float(solve_count) / float(teams_with_points))
else:
percentage = 0.0
json_data['game'].append({
'id': x.id,
'name': x.name,
'value': x.value,
'description': x.description,
'category': x.category,
'hidden': x.hidden,
'max_attempts': x.max_attempts,
'percentage_solved': percentage
})
db.session.close()
return jsonify(json_data)
else:
return render_template('admin/chals.html')
@admin_challenges.route('/admin/tags/<int:chalid>', methods=['GET', 'POST'])
@admins_only
def admin_tags(chalid):
if request.method == 'GET':
tags = Tags.query.filter_by(chal=chalid).all()
json_data = {'tags': []}
for x in tags:
json_data['tags'].append({'id': x.id, 'chal': x.chal, 'tag': x.tag})
return jsonify(json_data)
elif request.method == 'POST':
newtags = request.form.getlist('tags[]')
for x in newtags:
tag = Tags(chalid, x)
db.session.add(tag)
db.session.commit()
db.session.close()
return '1'
@admin_challenges.route('/admin/tags/<int:tagid>/delete', methods=['POST'])
@admins_only
def admin_delete_tags(tagid):
if request.method == 'POST':
tag = Tags.query.filter_by(id=tagid).first_or_404()
db.session.delete(tag)
db.session.commit()
db.session.close()
return '1'
@admin_challenges.route('/admin/files/<int:chalid>', methods=['GET', 'POST'])
@admins_only
def admin_files(chalid):
if request.method == 'GET':
files = Files.query.filter_by(chal=chalid).all()
json_data = {'files': []}
for x in files:
json_data['files'].append({'id': x.id, 'file': x.location})
return jsonify(json_data)
if request.method == 'POST':
if request.form['method'] == "delete":
f = Files.query.filter_by(id=request.form['file']).first_or_404()
upload_folder = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'])
if os.path.exists(os.path.join(upload_folder, f.location)): # Some kind of os.path.isfile issue on Windows...
os.unlink(os.path.join(upload_folder, f.location))
db.session.delete(f)
db.session.commit()
db.session.close()
return '1'
elif request.form['method'] == "upload":
files = request.files.getlist('files[]')
for f in files:
upload_file(file=f, chalid=chalid)
db.session.commit()
db.session.close()
return '1'
@admin_challenges.route('/admin/chal/<int:chalid>/<prop>', methods=['GET'])
@admins_only
def admin_get_values(chalid, prop):
challenge = Challenges.query.filter_by(id=chalid).first_or_404()
if prop == 'keys':
chal_keys = Keys.query.filter_by(chal=challenge.id).all()
json_data = {'keys': []}
for x in chal_keys:
json_data['keys'].append({'id': x.id, 'key': x.flag, 'type': x.key_type, 'type_name': get_key_class(x.key_type).name})
return jsonify(json_data)
elif prop == 'tags':
tags = Tags.query.filter_by(chal=chalid).a | ll()
json_data = {'tags': []}
for x in tags:
json_data['tags'].append({'id': x.id, 'chal': x.chal, 'tag': x.tag})
return jsonify(json_data)
@admin_challenges.route('/admin/chal/new', methods=['GET', 'POST'])
@admins_only
def admin_create_chal():
if request.method == 'POST':
files = request.files.getlist('files[]') |
# Create challenge
chal = Challenges(request.form['name'], request.form['desc'], request.form['value'], request.form['category'], int(request.form['chaltype']))
if 'hidden' in request.form:
chal.hidden = True
else:
chal.hidden = False
max_attempts = request.form.get('max_attempts')
if max_attempts and max_attempts.isdigit():
chal.max_attempts = int(max_attempts)
db.session.add(chal)
db.session.flush()
flag = Keys(chal.id, request.form['key'], int(request.form['key_type[0]']))
if request.form.get('keydata'):
flag.data = request.form.get('keydata')
db.session.add(flag)
db.session.commit()
for f in files:
upload_file(file=f, chalid=chal.id)
db.session.commit()
db.session.close()
return redirect(url_for('admin_challenges.admin_chals'))
else:
return render_template('admin/chals/create.html')
@admin_challenges.route('/admin/chal/delete', methods=['POST'])
@admins_only
def admin_delete_chal():
challenge = Challenges.query.filter_by(id=request.form['id']).first_or_404()
WrongKeys.query.filter_by(chalid=challenge.id).delete()
Solves.query.filter_by(chalid=challenge.id).delete()
Keys.query.filter_by(chal=challenge.id).delete()
files = Files.query.filter_by(chal=challenge.id).all()
Files.query.filter_by(chal=challenge.id).delete()
for file in files:
upload_folder = app.config['UPLOAD_FOLDER']
folder = os.path.dirname(os.path.join(os.path.normpath(app.root_path), upload_folder, file.location))
rmdir(folder)
Tags.query.filter_by(chal=challenge.id).delete()
Challenges.query.filter_by(id=challenge.id).delete()
db.session.commit()
db.session.close()
return '1'
@admin_challenges.route('/admin/chal/update', methods=['POST'])
@admins_only
def admin_update_chal():
challenge = Challenges.query.filter_by(id=request.form['id']).first_or_404()
challenge.name = request.form['name']
challenge.description = request.form['desc']
challenge.value = int(request.form.get('value', 0)) if request.form.get('value', 0) else 0
challenge.max_attempts = int(request.form.get('max_attempts', 0)) if request.form.get('max_attempts', 0) else 0
challenge.category = request.form['category']
challenge.hidden = 'hidden' in request.form
db.session.add(challenge)
db.session.commit()
db.session.close()
return redirect(url_for('admin_challenges.admin_chals'))
|
antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_0/ar_/test_artificial_32_RelativeDifference_Lag1Trend_0__100.py | Python | bsd-3-clause | 273 | 0.084249 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = | 'D', seed = 0, t | rendtype = "Lag1Trend", cycle_length = 0, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 0); |
ahmedaljazzar/edx-platform | lms/djangoapps/courseware/module_render.py | Python | agpl-3.0 | 49,790 | 0.002752 | """
Module rendering
"""
import hashlib
import json
import logging
from collections import OrderedDict
from functools import partial
from completion.models import BlockCompletion
from completion import waffle as completion_waffle
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.template.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from edx_django_utils.monitoring import set_custom_metrics_for_course_key, set_monitoring_transaction_name
from edx_proctoring.services import ProctoringService
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from requests.auth import HTTPBasicAuth
from six import text_type
from xblock.core import XBlock
from xblock.django.request import django_to_webob_request, webob_to_django_response
from xblock.exceptions import NoSuchHandlerError, NoSuchViewError
from xblock.reference.plugins import FSService
from xblock.runtime import KvsFieldData
import static_replace
from capa.xqueue_interface import XQueueInterface
from courseware.access import get_user_role, h | as_access
from courseware.entrance_exams import user_can_skip_entrance_exam, user_has_passed_entrance_exam
from courseware.masquerade import (
MasqueradingKeyValueStore,
filter_displayed_blocks,
is_masquerading_as_specific_student,
setup_masquerade
)
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from edxmako.shortcuts import render_to_string
from eventtracking import tracker
from lms.djangoapps.grades.signals.signals | import SCORE_PUBLISHED
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from lms.djangoapps.lms_xblock.runtime import LmsModuleSystem
from lms.djangoapps.verify_student.services import XBlockVerificationService
from openedx.core.djangoapps.bookmarks.services import BookmarksService
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.credit.services import CreditService
from openedx.core.djangoapps.util.user_utils import SystemUser
from openedx.core.lib.gating.services import GatingService
from openedx.core.lib.license import wrap_with_license
from openedx.core.lib.url_utils import quote_slashes, unquote_slashes
from openedx.core.lib.xblock_utils import request_token as xblock_request_token
from openedx.core.lib.xblock_utils import (
add_staff_markup,
replace_course_urls,
replace_jump_to_id_urls,
replace_static_urls,
wrap_xblock
)
from student.models import anonymous_id_for_user, user_by_anonymous_id
from student.roles import CourseBetaTesterRole
from track import contexts
from util import milestones_helpers
from util.json_request import JsonResponse
from django.utils.text import slugify
from xmodule.util.sandboxing import can_execute_unsafe_code, get_python_lib_zip
from xblock_django.user_service import DjangoXBlockUserService
from xmodule.contentstore.django import contentstore
from xmodule.error_module import ErrorDescriptor, NonStaffErrorDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
from xmodule.lti_module import LTIModule
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.x_module import XModuleDescriptor
from .field_overrides import OverrideFieldData
log = logging.getLogger(__name__)
if settings.XQUEUE_INTERFACE.get('basic_auth') is not None:
REQUESTS_AUTH = HTTPBasicAuth(*settings.XQUEUE_INTERFACE['basic_auth'])
else:
REQUESTS_AUTH = None
XQUEUE_INTERFACE = XQueueInterface(
settings.XQUEUE_INTERFACE['url'],
settings.XQUEUE_INTERFACE['django_auth'],
REQUESTS_AUTH,
)
# TODO: course_id and course_key are used interchangeably in this file, which is wrong.
# Some brave person should make the variable names consistently someday, but the code's
# coupled enough that it's kind of tricky--you've been warned!
class LmsModuleRenderError(Exception):
"""
An exception class for exceptions thrown by module_render that don't fit well elsewhere
"""
pass
def make_track_function(request):
'''
Make a tracking function that logs what happened.
For use in ModuleSystem.
'''
import track.views
def function(event_type, event):
return track.views.server_track(request, event_type, event, page='x_module')
return function
def toc_for_course(user, request, course, active_chapter, active_section, field_data_cache):
'''
Create a table of contents from the module store
Return format:
{ 'chapters': [
{'display_name': name, 'url_name': url_name, 'sections': SECTIONS, 'active': bool},
],
'previous_of_active_section': {..},
'next_of_active_section': {..}
}
where SECTIONS is a list
[ {'display_name': name, 'url_name': url_name,
'format': format, 'due': due, 'active' : bool, 'graded': bool}, ...]
where previous_of_active_section and next_of_active_section have information on the
next/previous sections of the active section.
active is set for the section and chapter corresponding to the passed
parameters, which are expected to be url_names of the chapter+section.
Everything else comes from the xml, or defaults to "".
chapters with name 'hidden' are skipped.
NOTE: assumes that if we got this far, user has access to course. Returns
None if this is not the case.
field_data_cache must include data from the course module and 2 levels of its descendants
'''
with modulestore().bulk_operations(course.id):
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course.id, course=course
)
if course_module is None:
return None, None, None
toc_chapters = list()
chapters = course_module.get_display_items()
# Check for content which needs to be completed
# before the rest of the content is made available
required_content = milestones_helpers.get_required_content(course.id, user)
# The user may not actually have to complete the entrance exam, if one is required
if user_can_skip_entrance_exam(user, course):
required_content = [content for content in required_content if not content == course.entrance_exam_id]
previous_of_active_section, next_of_active_section = None, None
last_processed_section, last_processed_chapter = None, None
found_active_section = False
for chapter in chapters:
# Only show required content, if there is required content
# chapter.hide_from_toc is read-only (bool)
display_id = slugify(chapter.display_name_with_default_escaped)
local_hide_from_toc = False
if required_content:
if unicode(chapter.location) not in required_content:
local_hide_from_toc = True
# Skip the current chapter if a hide flag is tripped
if chapter.hide_from_toc or local_hide_from_toc:
continue
sections = list()
for section in chapter.get_display_items():
# skip the section if it is hidden from the user
if section.hide_from_toc:
continue
is_section_active = (chapter.url_name == active_chapter and section.url_name == active_section)
if is_section_active:
found_active_section = True
section_context = {
'display_name': section.display_name_with_default_escaped,
'url_name': section.url_name,
'format': section.format if section.format is not None else '',
'due': section.due,
'active': is_se |
chaen/DIRAC | Core/Utilities/Graphs/__init__.py | Python | gpl-3.0 | 4,843 | 0.050795 | """ DIRAC Graphs package provides tools for creation of various plots to provide
graphical representation of the DIRAC Monitoring and Accounting data
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import print_function
__RCSID__ = "$Id$"
# Make sure the the Agg backend is used despite arbitrary configuration
import matplotlib
matplotlib.use( 'agg' )
import DIRAC
from DIRAC.Core.Utilities.Graphs.Graph import Graph
from DIRAC.Core.Utilities.Graphs.GraphUtilities import evalPrefs
common_prefs = {
'background_color':'white',
'figure_padding':12,
'plot_grid':'1:1',
'plot_padding':0,
'frame':'On',
'font' : 'Lucida Grande',
'font_family' : 'sans-serif',
'dpi':100,
'legend':True,
'legend_position':'bottom',
'legend_max_rows':99,
'legend_max_columns':4,
'square_axis':False,
'scale_data': None,
'scale_ticks': None
}
graph_large_prefs = {
'width':1000,
'height':700,
'text_size':8,
'subtitle_size':10,
'subtitle_padding':5,
'title_size':15,
'title_padding':5,
'text_padding':5,
'figure_padding':15,
'plot_title_size':12,
'legend_width':980,
'legend_height':150,
'legend_padding':20,
'limit_labels':15,
'graph_time_stamp':True
}
graph_normal_prefs = {
'width':800,
'height':600,
'text_size':8,
'subtitle_size':10,
'subtitle_padding':5,
'title_size':15,
'title_padding':10,
'text_padding':5,
'figure_padding':12,
'plot_title_size':12,
'legend_width':780,
'legend_height':120,
'legend_padding':20,
'limit_labels':15,
'graph_time_stamp':True,
'label_text_size' : 14
}
graph_small_prefs = {
'width':450,
'height':330,
'text_size':10,
'subtitle_size':5,
'subtitle_padding':4,
'title_size':10,
'title_padding':6,
'text_padding':3,
'figure_padding':10,
'plot_title_size':8,
'legend_width':430,
'legend_height':50,
'legend_padding':10,
'limit_labels':15,
'graph_time_stamp':True
}
graph_thumbnail_prefs = {
'width':100,
'height':80,
'text_size':6,
'subtitle_size':0,
'subtitle_padding':0,
'title_size':8,
'title_padding':2,
'text_padding':1,
'figure_padding':2,
'plot_title':'NoTitle',
'legend':False,
'plot_axis_grid':False,
'plot_axis':False,
'plot_axis_labels':False,
'graph_time_stamp':False,
'tight_bars':True
}
def graph( data, fileName, *args, **kw ):
prefs = evalPrefs( *args, **kw )
graph_size = prefs.get('graph_size', 'normal')
if graph_size == "normal":
defaults = graph_normal_prefs
elif graph_size == "small":
defaults = graph_small_prefs
elif graph_size == "thumbnail":
defaults = graph_thumbnail_prefs
elif graph_size == "large":
| defaults = graph_large_prefs
graph = | Graph()
graph.makeGraph( data, common_prefs, defaults, prefs )
graph.writeGraph( fileName, 'PNG' )
return DIRAC.S_OK( {'plot':fileName} )
def __checkKW( kw ):
if 'watermark' not in kw:
kw[ 'watermark' ] = "%s/DIRAC/Core/Utilities/Graphs/Dwatermark.png" % DIRAC.rootPath
return kw
def barGraph( data, fileName, *args, **kw ):
kw = __checkKW( kw )
graph( data, fileName, plot_type = 'BarGraph', statistics_line = True, *args, **kw )
def lineGraph( data, fileName, *args, **kw ):
kw = __checkKW( kw )
graph( data, fileName, plot_type = 'LineGraph', statistics_line = True, *args, **kw )
def curveGraph( data, fileName, *args, **kw ):
kw = __checkKW( kw )
graph( data, fileName, plot_type = 'CurveGraph', statistics_line = False, *args, **kw )
def cumulativeGraph( data, fileName, *args, **kw ):
kw = __checkKW( kw )
graph( data, fileName, plot_type = 'LineGraph', cumulate_data = True, *args, **kw )
def pieGraph( data, fileName, *args, **kw ):
kw = __checkKW( kw )
prefs = {'xticks':False, 'yticks':False, 'legend_position':'right'}
graph( data, fileName, prefs, plot_type = 'PieGraph', *args, **kw )
def qualityGraph( data, fileName, *args, **kw ):
kw = __checkKW( kw )
prefs = {'plot_axis_grid':False}
graph( data, fileName, prefs, plot_type = 'QualityMapGraph', *args, **kw )
def textGraph( text, fileName, *args, **kw ):
kw = __checkKW( kw )
prefs = {'text_image':text}
graph( {}, fileName, prefs, *args, **kw )
def histogram( data, fileName, bins, *args, **kw ):
try:
from pylab import hist
except:
print("No pylab module available")
return
kw = __checkKW( kw )
values, vbins, _patches = hist( data, bins )
histo = dict( zip( vbins, values ) )
span = ( max( data ) - min( data ) ) / float( bins ) * 0.95
kw = __checkKW( kw )
graph( histo, fileName, plot_type = 'BarGraph', span = span, statistics_line = True, *args, **kw )
|
metacloud/molecule | setup.py | Python | mit | 12,584 | 0 | #! /usr/bin/env python
# Copyright (c) 2019 Red Hat, Inc.
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Molecule distribution package setuptools installer."""
import setuptools
HAS_DIST_INFO_CMD = False
try:
import setuptools.command.dist_info
HAS_DIST_INFO_CMD = True
except ImportError:
"""Setuptools version is too old."""
ALL_STRING_TYPES = tuple(map(type, ('', b'', u'')))
MIN_NATIVE_SETUPTOOLS_VERSION = 34, 4, 0
"""Minimal setuptools having good read_configuration implementation."""
RUNTIME_SETUPTOOLS_VERSION = tuple(map(int, setuptools.__version__.split('.')))
"""Setuptools imported now."""
READ_CONFIG_SHIM_NEEDED = (
RUNTIME_SETUPTOOLS_VERSION < MIN_NATIVE_SETUPTOOLS_VERSION
)
def str_if_nested_or_str(s):
"""Turn input into a native string if possible."""
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict, )):
return stringify_dict_contents(s)
return s
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {
str_if_nested_or_str(k): str_if_nested_or_str(v)
for k, v in dct.items()
}
if not READ_CONFIG_SHIM_NEEDED:
from setuptools.config import read_configuration, ConfigOptionsHandler
import setuptools.config
import setuptools.dist
# Set default value for 'use_scm_version'
setattr(setuptools.dist.Distribution, 'use_scm_version', False)
# Attach bool parser to 'use_scm_version' option
class ShimConfigOptionsHandler(ConfigOptionsHandler):
"""Extension class for ConfigOptionsHandler."""
@property
def parsers(self):
"""Return an option mapping with default data type parsers."""
_orig_parsers = super(ShimConfigOptionsHandler, self).parsers
return dict(use_scm_version=self._parse_bool, **_orig_parsers)
def parse_section_packages__find(self, section_options):
find_kwargs = super(
ShimConfigOptionsHandler, self
).parse_section_packages__find(section_options)
return stringify_dict_contents(find_kwargs)
setuptools.config.ConfigOptionsHandler = ShimConfigOptionsHandler
else:
"""This is a shim for setuptools<required."""
import functools
import io
import json
import sys
import warnings
try:
import setuptools.config
def filter_out_unknown_section(i):
def chi(self, *args, **kwargs):
i(self, *args, **kwargs)
self.sections = {
s: v for s, v in self.sections.items()
if s != 'packages.find'
}
return chi
setuptools.config.ConfigHandler.__init__ = filter_out_unknown_section(
setuptools.config.ConfigHandler.__init__,
)
except ImportError:
pass
def ignore_unknown_options(s):
@functools.wraps(s)
def sw(**attrs):
try:
ignore_warning_regex = (
r"Unknown distribution option: "
r"'(license_file|project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
return s(**attrs)
finally:
warnings.resetwarnings()
return sw
def parse_predicates(python_requires):
import itertools
import operator
sorted_operators_map = tuple(sorted(
{
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'': operator.eq,
}.items(),
key=lambda i: len(i[0]),
reverse=True,
))
def is_decimal(s):
return type(u'')(s).isdecimal()
conditions = map(str.strip, python_requires.split(','))
for c in conditions:
for op_sign, op_func in sorted_operators_map:
if not c.startswith(op_sign):
continue
raw_ver = itertools.takewhile(
is_decimal,
c[len(op_sign):].strip().split('.'),
)
ver = tuple(map(int, raw_ver))
yield op_func, ver
break
def validate_required_python_or_fail(python_requires=None):
if python_requires is None:
return
python_version = sys.version_info
preds = parse_predicates(python_requires)
for op, v in preds:
py_ver_slug = python_version[:max(len(v), 3)]
condition_matches = op(py_ver_slug, v)
if not condition_matches:
raise RuntimeError(
"requires Python '{}' but the running Python is {}".
format(
python_requires,
'.'.join(map(str, python_version[:3])),
| )
)
def verify_required_python_runtime(s):
@functools.wraps(s)
def sw(**attrs):
try:
validate_required_python_or_fail(attrs.get('python_requires'))
except Runtim | eError as re:
sys.exit('{} {!s}'.format(attrs['name'], re))
return s(**attrs)
return sw
setuptools.setup = ignore_unknown_options(setuptools.setup)
setuptools.setup = verify_required_python_runtime(setuptools.setup)
try:
from configparser import ConfigParser, NoSectionError
except ImportError:
from ConfigParser import ConfigParser, NoSectionError
ConfigParser.read_file = ConfigParser.readfp
def maybe_read_files(d):
"""Read files if the string starts with `file:` marker."""
FILE_FUNC_MARKER = 'file:'
d = d.strip()
if not d.startswith(FILE_FUNC_MARKER):
return d
descs = []
for fname in map(str.strip, str(d[len(FILE_FUNC_MARKER):]).split(',')):
with io.open(fname, encoding='utf-8') as f:
descs.append(f.read())
return ''.join(descs)
def cfg_val_to_list(v):
"""Turn config val to list and filter out empty lines."""
return list(filter(bool, map(str.strip, str(v).strip().splitlines())))
def cfg_val_to_dict(v):
"""Turn config val to dict and filter out empty lines."""
return dict(
map(lambda l: list(map(str.strip, l.split('=', 1))),
filter(bool, map(str.strip, str(v).strip().splitlines())))
)
def cfg_val_to_primitive(v):
"""Parse primitive config val to appropriate data type."""
return json.loads(v.strip().lower())
def read_configuration(filepath):
"""Read metadata and optio |
jeremiah-c-leary/vhdl-style-guide | vsg/rules/package_body/rule_501.py | Python | gpl-3.0 | 563 | 0 |
from vsg.rules import token_case
from vsg import token
lTokens = []
lTokens.append(token.package_body.body_keyword)
class rule_501(token_case):
'''
This rule checks the **body** keyword has proper case.
|configuring_uppercase_and_lowercase_rules_link|
**Violation**
| .. code-block:: vhdl
packa | ge BODY FIFO_PKG is
**Fix**
.. code-block:: vhdl
package body FIFO_PKG is
'''
def __init__(self):
token_case.__init__(self, 'package_body', '501', lTokens)
self.groups.append('case::keyword')
|
c4fcm/CivilServant | tests/test_stylesheet_experiment_controller.py | Python | mit | 19,514 | 0.014861 | import pytest
import os, yaml
## SET UP THE DATABASE ENGINE
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
BASE_DIR = os.path.join(TEST_DIR, "../")
ENV = os.environ['CS_ENV'] = "test"
from mock import Mock, patch
import unittest.mock
import simplejson as json
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import and_, or_
import glob, datetime, time, pytz, math
from app.controllers.stylesheet_experiment_controller import *
from utils.common import *
from dateutil import parser
import praw, csv, random, string
from collections import Counter
### LOAD THE CLASSES TO TEST
from app.models import *
import app.cs_logger
db_session = DbEngine(os.path.join(TEST_DIR, "../", "config") + "/{env}.json".format(env=ENV)).new_session()
log = app.cs_logger.get_logger(ENV, BASE_DIR)
def clear_all_tables():
db_session.query(FrontPage).delete()
db_session.query(SubredditPage).delete()
db_session.query(Subreddit).delete()
db_session.query(Post).delete()
db_session.query(User).delete()
db_session.query(Comment).delete()
db_session.query(Experiment).delete()
db_session.query(ExperimentThing).delete()
db_session.query(ExperimentAction).delete()
db_session.query(ExperimentThingSnapshot).delete()
db_session.query(EventHook).delete()
db_session.commit()
def setup_function(function):
clear_all_tables()
def teardown_function(function):
clear_all_tables()
@patch('praw.Reddit', autospec=True)
def test_initialize_experiment(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
assert len(db_session.query(Experiment).all()) == 0
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
assert len(db_session.query(Experiment).all()) == 1
experiment = controller.experiment
assert experiment.name == experiment_name
assert(experiment.controller == experiment_config['controller'])
settings = json.loads(experiment.settings_json)
for k in ['username', 'subreddit', 'subreddit_id', 'start_time', 'end_time', 'controller']:
assert settings[k] == experiment_config[k]
for condition_name in experiment_config['conditions']:
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_config['conditions'][condition_name]['randomizations']), "r") as f:
conditions = []
for row in csv.DictReader(f):
conditions.append(row)
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_config['conditions'][condition_name]['randomizations']), "r") as f:
nonconditions = []
for row in csv.DictReader(f):
nonconditions.append(row)
assert len(settings['conditions'][condition_name]['randomizations']) == len(conditions)
assert settings['conditions'][condition_name]['next_randomization'] == 0
@patch('praw.Reddit', autospec=True)
def test_determine_intervention_eligible(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
assert len(db_session.query(Experiment).all()) == 0
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
## in the case with no interventions, confirm eligibility
assert controller.determine_intervention_eligible() == True
## now create an action and confirm ineligibility outside the interval
experiment_action = ExperimentAction(
experiment_id = controller.experiment.id,
praw_key_id = "TEST",
action = "Intervention:{0}.{1}".format("TEST","TEST"),
action_object_type = ThingType.STYLESHEET.value,
action_object_id = None,
metadata_json = json.dumps({"arm":"TEST", "condition":"TEST"})
)
db_session.add(experiment_action)
db_session.commit()
assert controller.determine_intervention_eligible() == False
## now change the action and confirm eligibility within the interval
experiment_action.created_at = experiment_action.created_at - datetime.timedelta(seconds=controller.experiment_settings['intervention_interval_seconds'])
db_session.commit()
assert controller.determine_intervention_eligible() == True
## now change the end date of the experiment and confirm ineligibility
controller.experiment_settings['end_time'] = str((datetime.datetime.utcnow() - datetime.timedelta(days=1)).replace(tzinfo=pytz.utc))
#controller.experiment.settings = json.dumps(controller.experiment_settings)
#db_session.commit()
assert controller.determine_intervention_eligible() == False
@patch('praw.Reddit', autospec=True)
def test_select_condition(mock_reddit):
r = mock_reddit.return_value
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
assert controller.select_condition(current_time = parser.parse("07/21/2017 00:00:00")) == "special"
assert controller.select_condition(current_time = parser.parse("07/20/2017 00:00:00")) == "normal"
@patch('praw.Reddit', autospec=True)
def test_set_stylesheet(mock_reddit):
r = mock_reddit.return_value
with open(os.path.join(BASE_DIR,"tests", "fixture_data", "stylesheet_0" + ".json"), "r") as f:
stylesheet = json.loads(f.read())
r.get_ | stylesheet.return_value = stylesheet
r.set_stylesheet.return_value = {"errors":[]}
patch('praw.')
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_ | DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
for condition in ['special', 'normal']:
for arm in ["arm_0", "arm_1"]:
assert (controller.experiment_settings['conditions'][condition]['arms'][arm] in stylesheet['stylesheet'].split("\n"))!=True
for condition in ['special', 'normal']:
for arm in ["arm_0", "arm_1"]:
line_length = len(stylesheet['stylesheet'].split("\n"))
result_lines = controller.set_stylesheet(condition, arm).split("\n")
assert controller.experiment_settings['conditions'][condition]['arms'][arm] in result_lines
assert len(result_lines) == line_length + 3
def setup_comment_monitoring(r, yesterday_posts, today_posts):
####################
## SET UP EXPERIMENT
subreddit_posts = []
with open(os.path.join(BASE_DIR,"tests", "fixture_data", "subreddit_posts_0" + ".json"), "r") as f:
subreddit_posts = [z['data'] for z in json.loads(f.read())['data']['children']]
experiment_name = "stylesheet_experiment_test"
with open(os.path.join(BASE_DIR,"config", "experiments", experiment_name + ".yml"), "r") as f:
experiment_config = yaml.full_load(f)['test']
controller = StylesheetExperimentController(experiment_name, db_session, r, log)
today = datetime.datetime.utcnow()
## add posts created yesterday
for i in list(range(0,yesterday_posts)):
post_fixture = subreddit_posts[i]
post = Post(id = post_fixture['id'],
created_at = today - datetime.timedelta(days=1),
subreddit_id = controller.subreddit_id,
post_data = json.dumps(post_fixture))
db_session.add(post)
db_session.commit()
assert db_session.query(Post).count() == yesterday_posts
## add posts created today
today_post_list = []
for i in list(range(yesterday_posts,yesterday_pos |
a-parhom/edx-platform | pavelib/paver_tests/test_paver_pytest_cmds.py | Python | agpl-3.0 | 7,433 | 0.00444 | """
Tests for the pytest paver commands themselves.
Run just this test with: paver test_lib -t pavelib/paver_tests/test_paver_pytest_cmds.py
"""
import unittest
import os
import ddt
from pavelib.utils.test.suites import SystemTestSuite, LibTestSuite
from pavelib.utils.envs import Env
XDIST_TESTING_IP_ADDRESS_LIST = '0.0.0.1,0.0.0.2,0.0.0.3'
@ddt.ddt
class TestPaverPytestCmd(unittest.TestCase):
"""
Test Paver pytest commands
"""
def _expected_command(self, root, test_id, pytestSubclass, run_under_coverage=True,
processes=0, xdist_ip_addresses=None):
"""
Returns the command that is expected to be run for the given test spec
and store.
"""
report_dir = Env.REPORT_DIR / root
shard = os.environ.get('SHARD')
if shard:
report_dir = report_dir / 'shard_' + shard
expected_statement = [
"python",
"-Wd",
"-m",
"pytest"
]
if pytestSubclass == "SystemTestSuite":
expected_statement.append("--ds={}".format('{}.envs.{}'.format(root, Env.TEST_SETTINGS)))
expected_statement.append("--junitxml={}".format(report_dir / "nosetests.xml"))
if xdist_ip_addresses:
expected_statement.append('--dist=loadscope')
for ip in xdist_ip_addresses.split(','):
if processes <= 0:
processes = 1
if pytestSubclass == "SystemTestSuite":
django_env_var_cmd = "export DJANGO_SETTINGS_MODULE={}.envs.test".format(root)
elif pytestSubclass == "LibTestSuite":
if 'pavelib/paver_tests' in test_id:
django_env_var_cmd = "export DJANGO_SETTINGS_MODULE={}.envs.test".format(root)
else:
django_env_var_cmd = "export DJANGO_SETTINGS_MODULE='openedx.tests.settings'"
xdist_string = '--tx {}*ssh="ubuntu@{} -o StrictHostKeyChecking=no"' \
'//python="source /edx/app/edxapp/edxapp_env; {}; python"' \
'//chdir="/edx/app/edxapp/edx-platform"' \
.format(processes, ip, django_env_var_cmd)
expected_statement.append(xdist_string)
for rsync_dir in Env.rsync_dirs():
expected_statement.append('--rsyncdir {}'.format(rsync_dir))
else:
if processes == -1:
expected_statement.append('-n auto')
expected_statement.append('--dist=loadscope')
elif processes != 0:
expected_statement.append('-n {}'.format(processes))
expected_statement.append('--dist=loadscope')
expected_statement.extend([
"-p no:randomly",
test_id
])
if run_under_coverage:
expected_statement.append('--cov')
expected_statement.append('--cov-report=')
return expected_statement
@ddt.data('lms', 'cms')
def test_SystemTestSuite_suites(self, system):
test_id = 'tests'
suite = SystemTestSuite(system, test_id=test_id)
assert suite.cmd == self._expected_command(system, test_id, "SystemTestSuite")
@ddt.data('lms', 'cms')
def test_SystemTestSuite_auto_processes(self, system):
test_id = 'tests'
suite = SystemTestSuite(system, test_id=test_id, processes=-1)
assert suite.cmd == self._expected_command(system, test_id, "SystemTestSuite", processes=-1)
@ddt.data('lms', 'cms')
def test_SystemTestSuite_multi_processes(self, system):
test_id = 'tests'
suite = SystemTestSuite(system, test_id=test_id, processes=3)
assert suite.cmd == self._expected_command(system, test_id, "SystemTestSuite", processes=3)
@ddt.data('lms', 'cms')
def test_SystemTestSuite_with_xdist(self, system):
test_id = 'tests'
suite = SystemTestSuite(system, test_id=test_id, xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
assert suite.cmd == self._expected_command(system, test_id, "SystemTestSuite",
xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
@ddt.data('lms', 'cms')
def test_SystemTestSuite_with_xdist_multi_processes(self, system):
test_id = 'tests'
suite = SystemTestSuite(system, test_id=test_id, processes=2, xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
assert suite.cmd == self._expected_command(system, test_id, "SystemTestSuite", processes=2,
xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
@ddt.data('lms', 'cms')
def test_SystemTestSuite_with_xdist_negative_processes(self, system):
test_id = 'tests'
suite = SystemTestSuite(system, test_id=test_id, processes=-1, xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
assert suite.cmd == self._expected_command(system, test_id, "SystemTestSuite", processes=-1,
xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
@ddt.data('common/lib/xmodule', 'pavelib/paver_tests')
def test_LibTestSuite_suites(self, system):
test_id = 'tests'
suite = LibTestSuite(system, test_id=test_id)
assert suite.cmd == self._expected_command(system, test_id, "LibTestSuite")
@ddt.data('common/lib/xmodule', 'pavelib/paver_tests')
def test_LibTestSuite_auto_processes(self, system):
test_id = 'tests'
suite = LibTestSuite(system, test_id=test_id, processes=-1)
assert suite.cmd == self._expected_command(system, test_id, "LibTestSuite", processes=-1)
@ddt.data('common/lib/xmodule', 'pavelib/paver_tests')
def test_LibTestSuite_multi_processes(self, system):
test_id = 'tests'
suite = LibTestSuite(system, test_id=test_id | , processes=3)
assert suite.cmd == self._expected_command(system, test_id, "LibTestSuite", processes=3)
@ddt.data('common/lib/xmodule', 'pavelib/paver_tests')
def test_LibTestSuite_with_xdist(self, system):
test_id = 'tests'
| suite = LibTestSuite(system, test_id=test_id, xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
assert suite.cmd == self._expected_command(system, test_id, "LibTestSuite",
xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
@ddt.data('common/lib/xmodule', 'pavelib/paver_tests')
def test_LibTestSuite_with_xdist_multi_processes(self, system):
test_id = 'tests'
suite = LibTestSuite(system, test_id=test_id, processes=2, xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
assert suite.cmd == self._expected_command(system, test_id, "LibTestSuite", processes=2,
xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
@ddt.data('common/lib/xmodule', 'pavelib/paver_tests')
def test_LibTestSuite_with_xdist_negative_processes(self, system):
test_id = 'tests'
suite = LibTestSuite(system, test_id=test_id, processes=-1, xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
assert suite.cmd == self._expected_command(system, test_id, "LibTestSuite", processes=-1,
xdist_ip_addresses=XDIST_TESTING_IP_ADDRESS_LIST)
|
apache/bloodhound | bloodhound_multiproduct/tests/resource.py | Python | apache-2.0 | 14,341 | 0.000768 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os.path
import shutil
import tempfile
import unittest
from StringIO import StringIO
from datetime import datetime
import trac.ticket.api
import trac.ticket.report
import trac.ticket.roadmap
import trac.wiki.api
from trac import resource
from trac.attachment import Attachment
from trac.ticket.model import Ticket
from trac.util.datefmt import utc
from trac.wiki.model import WikiPage
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductResourceTestCase(MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self._load_product_from_data(self.global_env, u'xü')
self.env = ProductEnvironment(self.global_env, self.default_product)
self.env1 = ProductEnvironment(self.global_env, u'xü')
self._load_default_data(self.global_env)
self._load_default_data(self.env1)
# Enable product system component in product context
self.env.enable_component(MultiProductSystem)
def tearDown(self):
self.global_env.reset_db()
self.global_env = self.env = None
class ProductAttachmentResourceTestCase(ProductResourceTestCase):
def setU | p(self):
ProductResourceTestCase.setUp(self)
self.global_env.path = os.path.join(tempfile.gettempdir(),
'trac-tempenv')
if os.path.exists(self.global_env.path):
shutil.rmtree(self.global_env.path)
os.mkdir(self.global_env.path)
attachment = Attachment(self.global_env, 'ticket', 1)
attachment.description = 'Global Bar'
attachment.insert('foo.t | xt', StringIO(''), 0)
attachment = Attachment(self.env1, 'ticket', 1)
attachment.description = 'Product Bar'
attachment.insert('foo.txt', StringIO(''), 0)
self.resource = resource.Resource('ticket',
1).child('attachment', 'foo.txt')
def tearDown(self):
shutil.rmtree(self.global_env.path)
ProductResourceTestCase.tearDown(self)
def test_global_neighborhood_attachments(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] Attachment 'foo.txt' in [global:] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Attachment 'foo.txt' in [global:] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] foo.txt ([global:] Ticket #1)",
resource.get_resource_shortname(self.env, target))
self.assertEquals('Global Bar',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/attachment/ticket/1/foo.txt',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_attachments(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] Attachment 'foo.txt' in [product:xü] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Attachment 'foo.txt' in [product:xü] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] foo.txt ([product:xü] Ticket #1)",
resource.get_resource_shortname(self.env, target))
self.assertEquals('Product Bar',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/attachment/ticket/1/foo.txt',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductMilestoneResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('milestone', 'milestone1')
def test_global_neighborhood_milestone(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_name(self.env, target))
self.assertEquals("milestone1",
resource.get_resource_shortname(self.env, target))
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/milestone/milestone1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_milestone(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"milestone1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/milestone/milestone1',
resource.get_resource_url(self.env,
target, self.env.href))
# FIXME: No resource manager for reports in core ?
class ProductReportResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('report', 1)
def test_global_neighborhood_report(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] report:1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] report:1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] report:1",
resource.get_resource_shortname(self.env, target))
self.assertEquals('[global:] report:1 at version None',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/report/1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_report(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[pr |
Passw/gn_GFW | build/android/gyp/proguard.py | Python | gpl-3.0 | 3,458 | 0.011857 | #!/usr/bin/env python
#
# Copyright 201 | 3 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from util import build_utils
from util import proguard_util
_DANGEROUS_OPTIMIZATIONS = [
"class/unboxing/enum",
# See crb | ug.com/625992
"code/allocation/variable",
# See crbug.com/625994
"field/propagation/value",
"method/propagation/parameter",
"method/propagation/returnvalue",
]
def _ParseOptions(args):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--proguard-path',
help='Path to the proguard executable.')
parser.add_option('--input-paths',
help='Paths to the .jar files proguard should run on.')
parser.add_option('--output-path', help='Path to the generated .jar file.')
parser.add_option('--proguard-configs', action='append',
help='Paths to proguard configuration files.')
parser.add_option('--proguard-config-exclusions',
default='',
help='GN list of paths to proguard configuration files '
'included by --proguard-configs, but that should '
'not actually be included.')
parser.add_option('--mapping', help='Path to proguard mapping to apply.')
parser.add_option('--is-test', action='store_true',
help='If true, extra proguard options for instrumentation tests will be '
'added.')
parser.add_option('--classpath', action='append',
help='Classpath for proguard.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--enable-dangerous-optimizations', action='store_true',
help='Enable optimizations which are known to have issues.')
parser.add_option('--verbose', '-v', action='store_true',
help='Print all proguard output')
options, _ = parser.parse_args(args)
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGnList(arg)
options.classpath = classpath
configs = []
for arg in options.proguard_configs:
configs += build_utils.ParseGnList(arg)
options.proguard_configs = configs
options.proguard_config_exclusions = (
build_utils.ParseGnList(options.proguard_config_exclusions))
options.input_paths = build_utils.ParseGnList(options.input_paths)
return options
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseOptions(args)
proguard = proguard_util.ProguardCmdBuilder(options.proguard_path)
proguard.injars(options.input_paths)
proguard.configs(options.proguard_configs)
proguard.config_exclusions(options.proguard_config_exclusions)
proguard.outjar(options.output_path)
if options.mapping:
proguard.mapping(options.mapping)
classpath = list(set(options.classpath))
proguard.libraryjars(classpath)
proguard.verbose(options.verbose)
if not options.enable_dangerous_optimizations:
proguard.disable_optimizations(_DANGEROUS_OPTIMIZATIONS)
build_utils.CallAndWriteDepfileIfStale(
proguard.CheckOutput,
options,
input_paths=proguard.GetInputs(),
input_strings=proguard.build(),
output_paths=proguard.GetOutputs(),
depfile_deps=proguard.GetDepfileDeps())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
wireservice/csvkit | csvkit/utilities/csvsort.py | Python | mit | 2,282 | 0.003067 | #!/usr/bin/env python
import agate
from csvkit.cli import CSVKitUtility, parse_column_identifiers
class CSVSort(CSVKitUtility):
description = 'Sort CSV files. Like the Unix "sort" command, but for tabular data.'
def add_arguments(self):
self.argparser.add_argument(
'-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument(
'-c', '--columns', dest='columns',
help='A comma-separated list of column indices, names or ranges to sort by, e.g. "1,id,3-5". '
'Defaults to all columns.')
self.argparser.add_argument(
'-r', '--reverse', dest='reverse', action='store_true',
help='Sort in descending order.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference when parsing the input.')
def main(self):
if self.args.names_only:
self.print_column_names()
return
if self.additional_input_expected():
self.argparser.error('You must provide an input file or piped data.')
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
table = agate.Table.from_csv(
self.input_file,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
column_types=self.get_column_types(),
**self.reader_kwargs
)
column_ids = parse_column_identifiers(
self.args.columns,
table.column_names,
self.get_column_offset()
)
table = table. | order_by(column_ids, reverse=self.args.reverse)
t | able.to_csv(self.output_file, **self.writer_kwargs)
def launch_new_instance():
utility = CSVSort()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
h2oai/h2o-dev | h2o-py/h2o/h2o.py | Python | apache-2.0 | 72,871 | 0.005201 | # -*- encoding: utf-8 -*-
"""
h2o -- module for using H2O services.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import warnings
import webbrowser
import types
from h2o.backend import H2OConnection
from h2o.backend import H2OConnectionConf
from h2o.backend import H2OLocalServer
from h2o.exceptions import H2OConnectionError, H2OValueError
from h2o.utils.config import H2OConfigReader
from h2o.utils.shared_utils import check_frame_id, deprecated, gen_header, py_tmp_key, quoted, urlopen
from h2o.utils.typechecks import assert_is_type, assert_satisfies, BoundInt, BoundNumeric, I, is_type, numeric, U
from .estimators.deeplearning import H2OAutoEncoderEstimator
from .estimators.deeplearning import H2ODeepLearningEstimator
from .estimators.deepwater import H2ODeepWaterEstimator
from .estimators.estimator_base import H2OEstimator
from .estimators.xgboost import H2OXGBoostEstimator
from .estimators.gbm import H2OGradientBoostingEstimator
from .estimators.glm import H2OGeneralizedLinearEstimator
from .estimators.glrm import H2OGeneralizedLowRankEstimator
from .estimators.kmeans import H2OKMeansEstimator
from .estimators.naive_bayes import H2ONaiveBayesEstimator
from .estimators.pca import H2OPrincipalComponentAnalysisEstimator
from .estimators.random_forest import H2ORandomForestEstimator
from .estimators.stackedensemble import H2OStackedEnsembleEstimator
from .estimators.word2vec import H2OWord2vecEstimator
from .estimators.isolation_forest import H2OIsolationForestEstimator
from .expr import ExprNode
from .frame import H2OFrame
from .grid.grid_search import H2OGridSearch
from .job import H2OJob
from .model.model_base import ModelBase
from .transforms.decomposition import H2OSVD
from .utils.debugging import * # NOQA
from .utils.compatibility import * # NOQA
from .utils.compatibility import PY3
logging.basicConfig()
# An IPython deprecation warning is triggered after h2o.init(). Remove this once the deprecation has been resolved
warnings.filterwarnings('ignore', category=DeprecationWarning, module='.*/IPython/.*')
h2oconn = None # type: H2OConnection
def connect(server=None, url=None, ip=None, port=None, https=None, verify_ssl_certificates=None, auth=None,
proxy=None, cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates,
proxy=proxy, cookies=cookies,
verbose=verbose)
if verbose:
h2oconn.cluster.show_status()
return h2oconn
def api(endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to a previously connected server.
This function is mostly for internal purposes, but may occasionally be useful for direct access to
the backend H2O server. It has same parameters as :meth:`H2OConnection.request <h2o.backend.H2OConnection.request>`.
"""
# type checks are performed in H2OConnection class
_check_connection()
return h2oconn.request(endpoint, data=data, json=json, filename=filename, save_to=save_to)
def connection():
"""Return the current :class:`H2OConnection` handler."""
return h2oconn
def version_check():
"""Used to verify that h2o-python module and the H2O server are compatible with each other."""
from .__init__ import __version__ as ver_pkg
ci = h2oconn.cluster
if not ci:
raise H2OConnectionError("Connection not initialized. Did you run h2o.connect()?")
ver_h2o = ci.version
if ver_pkg == "SUBST_PROJECT_VERSION": ver_pkg = "UNKNOWN"
if str(ver_h2o) != str(ver_pkg):
branch_name_h2o = ci.branch_name
build_number_h2o = ci.build_number
if build_number_h2o is None or build_number_h2o == "unknown":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, ver_pkg))
elif build_number_h2o == "99999":
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, ver_pkg))
else:
raise H2OConnectionError(
"Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, ver_pkg, branch_name_h2o, build_number_h2o))
# Check age of the install
if ci.build_too_old:
print("Warning: Your H2O cluster version is too old ({})! Please download and install the latest "
"version from http://h2o.ai/download/".format(ci.build_age))
def init(url=None, ip=None, port=None, name=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
ext | ra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect | to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: cloud name. If None while connecting to an existing cluster it will not check the cloud name.
If set then will connect only if the target cloud name matches. If no instance is found and decides to start a local
one then this will be used as the cloud name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Pas |
Crystalnix/serverauditor-sshconfig | termius/porting/providers/securecrt/parser.py | Python | bsd-3-clause | 3,259 | 0 | # -*- coding: utf-8 -*-
"""Module with SecureCRT parser."""
from os.path import expanduser
class SecureCRTConfigParser(object):
"""SecureCRT xml parser."""
meta_sessions = ['Default']
def __init__(self, xml):
"""Construct parser instance."""
self.xml = xml
self.tree = {}
def parse_hosts(self):
"""Parse SecureCRT Sessions."""
sessions = list(self.get_element_by_name(list(self.xml), 'Sessions'))
self.parse_sessions(sessions, self.tree)
return self.tree
def parse_sessions(self, sessions, parent_node):
"""Parse SecureCRT sessions."""
for session in sessions:
if session.get('name') not in self.meta_sessions:
if not self.is_session_group(session):
host = self.make_host(session)
if not host:
continue
parent_node[host['label']] = host
else:
parent_node[session.get('name')] = {'__group': True}
self.parse_sessions(
list(session),
parent_node[session.get('name')]
)
def is_session_group(self, session):
"""Check node element type."""
return self.get_element_by_name(list(session), 'Hostname') is None
def parse_identity(self):
"""Parse SecureCRT SSH2 raw key."""
identity = self.get_element_by_name(list(self.xml), 'SSH2')
| if identity is None:
return None
identity_filename = self.get_element_by_name(
list(identity),
'Identity Filename V2'
)
if not self.check_attribute(identity_filename):
| return None
path = identity_filename.text.split('/')
public_key_name = path[-1].split('::')[0]
private_key_name = public_key_name.split('.')[0]
if path[0].startswith('$'):
path.pop(0)
path.insert(0, expanduser("~"))
path[-1] = public_key_name
public_key_path = '/'.join(path)
path[-1] = private_key_name
private_key_path = '/'.join(path)
return private_key_path, public_key_path
def make_host(self, session):
"""Adapt SecureCRT Session to Termius host."""
session_attrs = list(session)
hostname = self.get_element_by_name(session_attrs, 'Hostname')
port = self.get_element_by_name(session_attrs, '[SSH2] Port')
username = self.get_element_by_name(session_attrs, 'Username')
if not self.check_attribute(hostname):
return None
return {
'label': session.get('name'),
'hostname': hostname.text,
'port': port.text if self.check_attribute(port) else '22',
'username': username.text
if self.check_attribute(username) else None
}
def check_attribute(self, attr):
"""Check an attribute."""
return attr is not None and attr.text
def get_element_by_name(self, elements, name):
"""Get SecureCRT config block."""
for element in elements:
if element.get('name') == name:
return element
return None
|
CapitalD/taplist | migrations/versions/34223fdff008_.py | Python | mit | 807 | 0.002478 | """empty message
Revision ID: 34223fdff008
Revises: b4bcea5528b6
Create Date: 2017-08-22 10:19:27.959749
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '34223fdff008'
down_revision = 'b4bcea5528b6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('person', sa.Column('default_brewery', sa.Integer(), nullable=True))
op.add_column('person', sa.Column('default_location', sa | .Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_co | lumn('person', 'default_location')
op.drop_column('person', 'default_brewery')
# ### end Alembic commands ###
|
fragglet/midisnd | midi.py | Python | isc | 4,962 | 0.018541 | #!/usr/bin/env python
#
# Copyright (c) 2015, Simon Howard
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
INSTRUMENTS = [
"Acoustic Grand Piano", "Bright Acoustic Piano",
"Electric Grand Piano", "Honky-tonk Piano", "Electric Piano 1",
"Electric Piano 2", "Harpsichord", "Clavi", "Celesta", "Glockenspiel",
"Music Box", "Vibraphone", "Marimba", "Xylophone", "Tubular Bells",
"Dulcimer", "Drawbar Organ", "Percussive Organ", "Rock Organ",
"Church Organ", "Reed Organ", "Accordion", "Harmonica",
"Tango Accordion", "Acoustic Guitar (nylon)",
"Acoustic Guitar (steel)", "Electric Guitar (jazz)",
"Electric Guitar (clean)", "Electric Guitar (muted)",
"Overdriven Guitar", "Distortion Guitar", "Guitar harmonics",
"Acoustic Bass", "Electric Bass (finger)", "Electric Bass (pick)",
"Fretless Bass", "Slap Bass 1", "Slap Bass 2", "Synth Bass 1",
"Synth Bass 2", "Violin", "Viola", "Cello", "Contrabass",
"Tremolo Strings", "Pizzicato Strings", "Orchestral Harp", "Timpani",
"String Ensemble 1", "String Ensemble 2", "SynthStrings 1",
"SynthStrings 2", "Choir Aahs", "Voice Oohs", "Synth Voice",
| "Orchestra Hit", "Trumpet", "Trombone", "Tuba", "Muted Trumpet",
"French Horn", "Brass Section", "SynthBrass 1", "SynthBrass 2",
"Soprano Sax", "Alto Sax", "Tenor Sax", "Baritone Sax", "Oboe",
"English Horn", "Bassoon", "Clarinet", "Piccolo", "Flute", "Recorder",
"Pan Flute", "Blown Bottle", "Shakuhachi", "Whistle", "Ocarina",
"Lead 1 (square)", "Lead 2 (sawtooth)", "Lead 3 (calliope)",
"Lead 4 (chiff)" | , "Lead 5 (charang)", "Lead 6 (voice)",
"Lead 7 (fifths)", "Lead 8 (bass + lead)", "Pad 1 (new age)",
"Pad 2 (warm)", "Pad 3 (polysynth)", "Pad 4 (choir)", "Pad 5 (bowed)",
"Pad 6 (metallic)", "Pad 7 (halo)", "Pad 8 (sweep)", "FX 1 (rain)",
"FX 2 (soundtrack)", "FX 3 (crystal)", "FX 4 (atmosphere)",
"FX 5 (brightness)", "FX 6 (goblins)", "FX 7 (echoes)",
"FX 8 (sci-fi)", "Sitar", "Banjo", "Shamisen", "Koto", "Kalimba",
"Bag pipe", "Fiddle", "Shanai", "Tinkle Bell", "Agogo", "Steel Drums",
"Woodblock", "Taiko Drum", "Melodic Tom", "Synth Drum",
"Reverse Cymbal", "Guitar Fret Noise", "Breath Noise", "Seashore",
"Bird Tweet", "Telephone Ring", "Helicopter", "Applause", "Gunshot",
# Percussion instruments:
"Acoustic Bass Drum", "Bass Drum 1", "Side Stick", "Acoustic Snare",
"Hand Clap", "Electric Snare", "Low Floor Tom", "Closed Hi Hat",
"High Floor Tom", "Pedal Hi-Hat", "Low Tom", "Open Hi-Hat",
"Low-Mid Tom", "Hi-Mid Tom", "Crash Cymbal 1", "High Tom",
"Ride Cymbal 1", "Chinese Cymbal", "Ride Bell", "Tambourine",
"Splash Cymbal", "Cowbell", "Crash Cymbal 2", "Vibraslap",
"Ride Cymbal 2", "Hi Bongo", "Low Bongo", "Mute Hi Conga",
"Open Hi Conga", "Low Conga", "High Timbale", "Low Timbale",
"High Agogo", "Low Agogo", "Cabasa", "Maracas", "Short Whistle",
"Long Whistle", "Short Guiro", "Long Guiro", "Claves", "Hi Wood Block",
"Low Wood Block", "Mute Cuica", "Open Cuica", "Mute Triangle",
"Open Triangle",
]
# Constants for MIDI notes.
#
# For example:
# F# in Octave 3: O3.Fs
# C# in Octave -2: On2.Cs
# D-flat in Octave 1: O1.Db
# D in Octave 0: O0.D
# E in Octave 2: O2.E
class Octave:
def __init__(self, base):
self.C = base
self.Cs = base + 1
self.Db = base + 1
self.D = base + 2
self.Ds = base + 3
self.Eb = base + 3
self.E = base + 4
self.F = base + 5
self.Fs = base + 6
self.Gb = base + 6
self.G = base + 7
self.Gs = base + 8
self.Ab = base + 8
self.A = base + 9
self.As = base + 10
self.Bb = base + 10
self.B = base + 11
On5 = Octave(0) # Octave -5
On4 = Octave(12) # Octave -4
On3 = Octave(24) # Octave -3
On2 = Octave(36) # Octave -2
On1 = Octave(48) # Octave -1
O0 = Octave(60) # Octave 0
O1 = Octave(72) # Octave 1
O2 = Octave(84) # Octave 2
O3 = Octave(96) # Octave 3
O4 = Octave(108) # Octave 4
O5 = Octave(120) # Octave 5
# Given a MIDI note number, return a note definition in terms of the
# constants above.
def def_for_note(note):
OCTAVES = [ "On5", "On4", "On3", "On2", "On1",
"O0", "O1", "O2", "O3", "O4", "O5" ]
NOTES = [ "C", "Cs", "D", "Ds", "E", "F", "Fs",
"G", "Gs", "A", "As", "B" ]
return "%s.%s" % (OCTAVES[note // 12], NOTES[note % 12])
|
Tancata/phylo | ALE/gene_originations_at_node.py | Python | mit | 1,475 | 0.006102 | #from ALE reconciliations, print a list of gene fams originating at a node with P > 0.5.
#python gene_copies_at_node.py reconciliation_file_dir node_number protein_outputfile
import re, os, sys
directory = sys.argv[1]
node = int(sys.argv[2])
predicted_proteome_output_file = sys.argv[3]
ancestral_threshold = 0.5
total_num_gene_originations = 0
if os.path.exists(predicted_proteome_output_file):
os.unlink(predicted_proteome_output_file)
to_parse = [file | for file in os.listdir(directory) if file.endswith("uml_rec")]
print "GeneFam\tCopies"
for file in to_parse:
name_fields = re.split("faa", file)
fam_name = ''
representative_name = ''
fam_name = name_fields[0] + "faa"
representative_name = name_fields[0] + "_representative.fa"
inh = open(directory + file)
for line in inh:
fields = re.split("\t", line.rstrip())
if len(fields) > 1:
| if fields[0] == "S_internal_branch" and int(fields[1]) == node:
print fam_name + "\t" + str(fields[-2])
total_num_gene_originations += float(fields[-2])
#if above the threshold for including in the node protein content reconstruction, do so now
if float(fields[-2]) > ancestral_threshold:
os.system("cat medoid/" + representative_name + " | { cat; echo; } >> " + predicted_proteome_output_file)
inh.close()
print "Total gene originations at node: " + str(total_num_gene_originations)
|
hellhovnd/dentexchange | dentexchange/apps/employer/urls.py | Python | bsd-3-clause | 3,483 | 0.012346 | # -*- coding:utf-8 -*-
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from libs.decorators import login_required_for, EMPLOYER, EMPLOYEE
from membership.decorators import enforce_membership
from .decorators import enforce_business
from . import views
urlpatterns = patterns('',
url(r'^$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.DashboardView.as_view()))),
name='dashboard'),
url(r'^business/$',
login_required_for(EMPLOYER)(
views.BusinessFormView.as_view()),
name='business'),
url(r'^first_practice/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.FirstPraxisFormView.as_view()))),
name='first_praxis'),
url(r'^practice_profile/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.PraxisProfileView.as_view()))),
name='praxis_profile'),
url(r'^practice_profile/new/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.AddNewPraxisFormView.as_view()))),
name='add_new_praxis'),
url(r'^practice_profile/edit/(?P<pk>\d+)/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.EditPraxisFormView.as_view()))),
name='edit_praxis'),
url(r'^practice_profile/details/(?P<pk>\d+)/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.PraxisDetailView.as_view()))),
name='view_praxis'),
url(r'^praxis/delete/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.DeletePraxisView.as_view()))),
name='delete_praxis'),
url(r'^practice_profile/practice/(?P<pk>\d+)/new_posting/$',
login_required_for(EMPLOYER)(
enforce_business(
| enforce_membership(
views.AddNewPostingFormView.as_view()))),
name='add_new_posting'),
url(r'^practice_profile/posting/edit/(?P<pk | >\d+)/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.EditPostingFormView.as_view()))),
name='edit_posting'),
url(r'^practice_profile/posting/delete/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.DeletePostingView.as_view()))),
name='delete_posting'),
url(r'^practice_profile/posting/details/(?P<pk>\d+)/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.PostingDetailView.as_view()))),
name='view_posting'),
url(r'^practice_profile/practice/(?P<pk>\d+)/postings/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.JobPostingListView.as_view()))),
name='job_posting_list'),
url(r'^practice_profile/posting/(?P<pk>\d+)/(?P<post_status>\w+)/$',
login_required_for(EMPLOYER)(
enforce_business(
enforce_membership(
views.PostJobPostingView.as_view()))),
name='post_job_posting'),
url(r'^posting/view/$',
login_required_for((EMPLOYER, EMPLOYEE))(
enforce_membership(
views.PublicPostingDetailFromListView.as_view())),
name='view_public_posting_from_list'),
)
|
mwrlabs/veripy | contrib/rfc3736/builder.py | Python | gpl-3.0 | 1,149 | 0.007833 | from scapy.all import *
from scapy.layers import dhcp6
from time import time
def duid(ll_addr):
return DUID_LLT(lladdr=ll_addr, timeval=time())
def ias(requested, iface, T1=None, T2=None):
return map(lambda r: __build_ia(r, iface, T1, T2), requested)
def opti | ons(requested):
return map(__build_option_by_code, requested)
def __build_ia(request, iface, T1=None, T2=None):
ia = reques | t.__class__(iaid=request.iaid, T1=(T1 == None and request.T1 or T1), T2=(T2 == None and request.T2 or T2))
ia.ianaopts.append(DHCP6OptIAAddress(addr=str(iface.global_ip()), preflft=300, validlft=300))
return ia
def __build_option_by_code(code):
opt = __option_klass_by_code(code)()
if isinstance(opt, DHCP6OptClientFQDN):
opt.fqdn = 'testhost.local.'
elif isinstance(opt, DHCP6OptDNSDomains):
pass
elif isinstance(opt, DHCP6OptDNSServers):
opt.dnsservers.append('2001:500:88:200::10')
elif isinstance(opt, DHCP6OptSNTPServers):
opt.sntpservers.append('2001:500:88:200::10')
return opt
def __option_klass_by_code(code):
return getattr(dhcp6, dhcp6.dhcp6opts_by_code[code])
|
crobinso/virt-manager | virtinst/domain/launch_security.py | Python | gpl-2.0 | 1,325 | 0.001509 | from ..xmlbuilder import XMLBuilder, XMLProperty
class DomainLaunchSecurity(XMLBuilder):
"""
Class for generating <launchSecurity> XML element
"""
XML_NAME = "launchSecurity"
_XML_PROP_ORDER = ["type", "cbitpos", "reducedPhysBits", "policy",
"session", "dhCert"]
type = XMLProperty("./@type")
cbitpos = XMLProperty("./cbitpos", is_int= | True)
reducedPhysBits = XMLProperty("./reducedPhysBits", is_int=True)
policy = XMLProperty("./policy")
session = XMLProperty("./session")
dhCert = XMLProperty("./dhCert")
kernelHashes = XMLProperty("./@kernelHashes", is_yesno=True)
def _set_defaults_sev(self, guest):
if not guest.os.is_q35() or not guest.is_uefi():
raise RuntimeError(_("SEV launch security requires a Q35 UEFI machine"))
# 'policy' is a mandatory 4-byte argument for the SEV fi | rmware,
# if missing, let's use 0x03 which, according to the table at
# https://libvirt.org/formatdomain.html#launchSecurity:
# (bit 0) - disables the debugging mode
# (bit 1) - disables encryption key sharing across multiple guests
if self.policy is None:
self.policy = "0x03"
def set_defaults(self, guest):
if self.type == "sev":
return self._set_defaults_sev(guest)
|
Joacchim/Comix | src/archive.py | Python | gpl-2.0 | 20,550 | 0.001265 | # coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile
import gtk
try:
from py7zlib import Archive7z
except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
| " <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
retu | |
beagles/neutron_hacking | neutron/plugins/oneconvergence/plugin.py | Python | apache-2.0 | 11,208 | 0.000446 | # Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kedar Kulkarni, One Convergence, Inc.
"""Implementation of OneConvergence Neutron Plugin."""
from oslo.config import cfg
from oslo import messaging
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.common import constants as q_const
from neutron.common import exceptions as nexception
from neutron.common import rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_base
from neutron.db import quota_db # noqa
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as svc_constants
import neutron.plugins.oneconvergence.lib.config # noqa
import neutron.plugins.oneconvergence.lib.exception as nvsdexception
from neutron.plugins.oneconvergence.lib import nvsdlib as nvsd_lib
LOG = logging.getLogger(__name__)
IPv6 = 6
class NVSDRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin):
"""Agent callback."""
target = messaging.Target(version='1.1')
class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
external_net_db.External_net_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
portbindings_base.PortBindingBaseMixin):
"""L2 Virtual Network Plugin.
OneConvergencePluginV2 is a Neutron plugin that provides L2 Virtual Network
functionality.
"""
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ['agent',
'binding',
'dhcp_agent_scheduler',
'ext-gw-mode',
'external-net',
'extraroute',
'l3_agent_scheduler',
'quotas',
'router',
]
def __init__(self):
super(OneConvergencePluginV2, self).__init__()
self.oneconvergence_init()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS}
portbindings_base.register_port_dict_function()
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
def oneconvergence_init(self):
"""Initialize the connections and set the log levels for the plugin."""
self.nvsdlib = nvsd_lib.NVSDApi()
self.nvsdlib.set_connection()
def setup_rpc(self):
# RPC support
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI()
)
self.callbacks = [NVSDRpcCallbacks(), agents_db.AgentExtRpcCallback()]
self.rpc_servers = []
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
for svc_topic in self.service_topics.values():
target = messaging.Target(topic=svc_topic, server=cfg.CONF.host)
rpc_server = rpc.get_server(target, self.callbacks)
rpc_server.start()
self.rpc_servers.append(rpc_server)
def create_network(self, context, network):
net = self.nvsdlib.create_network(network['network'])
network['network']['id'] = net['id']
try:
neutron_net = super(OneConvergencePluginV2,
self).create_network(context, network)
#following call checks whether the network is external or not and
#if it is external then adds this network to externalnetworks
#table of neutron db
self._process_l3_create(context, neutron_net, network['network'])
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
self.nvsdlib.delete_network(net)
return neutron_net
def update_network(self, context, net_id, network):
with context.session.begin(subtransactions=True):
neutron_net = super(OneConvergencePluginV2,
self).update_network(context, net_id, network)
self.nvsdlib.update_network(neutron_net, network['network'])
# updates neutron database e.g. externalnetworks table.
self._process_l3_update(context, neutron_net, network['network'])
return neutron_net
def delete_network(self, context, net_id):
with context.session.begin(subtransactions=True):
network = self._get_network(context, net_id)
#get all the subnets under the network to delete them
subnets = self._get_subnets_by_network(context, net_id)
super(OneConvergencePluginV2, self).delete_network(context,
| net_id)
self.nvsdlib.delete_network(network, subnets)
def creat | e_subnet(self, context, subnet):
if subnet['subnet']['ip_version'] == IPv6:
raise nexception.InvalidInput(
error_message="NVSDPlugin doesn't support IPv6.")
neutron_subnet = super(OneConvergencePluginV2,
self).create_subnet(context, subnet)
try:
self.nvsdlib.create_subnet(neutron_subnet)
except nvsdexception.NVSDAPIException:
with excutils.save_and_reraise_exception():
#Log the message and delete the subnet from the neutron
super(OneConvergencePluginV2,
self).delete_subnet(context, neutron_subnet['id'])
LOG.error(_("Failed to create subnet, "
"deleting it from neutron"))
return neutron_subnet
def delete_subnet(self, context, subnet_id):
neutron_subnet = self._get_subnet(context, subnet_id)
with context.session.begin(subtransactions=True):
super(OneConvergencePluginV2, self).delete_subnet(context,
subnet_id)
self.nvsdlib.delete_subnet(neutron_subnet)
def update_subnet(self, context, subnet_id, subnet):
with context.session.begin(subtransactions=True):
neutron_subnet = super(OneConvergencePluginV2,
self).update_subnet(context, subnet_id,
|
Iconik/eve-suite | src/model/static/crt/relationship.py | Python | gpl-3.0 | 1,456 | 0.00206 | from model.flyweight import Flyweight
| from model.static.database import database
class Relationship(Flyweight):
def __init__(self, relationship_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.relationship_id = relationship_id
cursor = database.get_cursor(
"select * from crtRelationships where relationshipID={};".format(
self.relationship_id))
row = cursor.fetc | hone()
self.parent_id = row["parentID"]
self.parent_type_id = row["parentTypeID"]
self.parent_level = row["parentLevel"]
self.child_id = row["childID"]
cursor.close()
self._parent = None
self._parent_type = None
self._child = None
def get_parent(self):
if self._parent is None:
from model.static.crt.certificate import Certificate
self._parent = Certificate(self.parent_id)
return self._parent
def get_parent_type(self):
if self._parent_type is None:
from model.static.inv.type import Type
self._parent_type = Type(self.parent_type_id)
return self._parent_type
def get_child(self):
if self._child is None:
from model.static.crt.certificate import Certificate
self._child = Certificate(self.child_id)
return self._child
|
he7d3r/ores | ores/score_request.py | Python | mit | 3,803 | 0.000526 | import json
class ScoreRequest:
def __init__(self, context_name, rev_ids, model_names, precache=False,
include_features=False, injection_caches=None,
model_info=None, ip=None):
"""
Construct a ScoreRequest from parameters.
:Parameters:
context_name : str
The name of the content for the query -- usually a wikidb name
rev_ids : `iterable` ( `int` )
A set of revision IDs to score
model_names : `iterable` ( `str` )
A set of model_names to use in scoring
precache : bool
If true, mark the request as a "precache" request
include_features : bool
If true, include feature values in the response
injection_caches : dict
A mapping of injection_cache to `rev_id` to use for injecting
cached data when extracting features/scoring.
model_info : `list` ( `str` )
A list of model information fields to include in the response
"""
self.context_name = context_name
self.rev_ids = set(rev_ids)
self.model_names = set(model_names)
self.precache = precache
self.include_features = include_features
self.injection_caches = injection_caches or {}
self.model_info = model_info
self.ip = ip
def __str__(self):
return self.format()
def format(self, rev_id=None, model_name=None):
"""
Fomat a request or a sub-part of a request based on a rev_id and/or
model_name. This is useful for logging.
"""
rev_ids = rev_id if rev_id is not None else set(self.rev_ids)
model_names = model_name if model_name is not None else set(self.model_names)
common = [self.context_name, rev_ids, model_names]
optional = []
if self.precache:
optional.append("precache")
if self.include_features:
optional.append("features")
if self.injection_caches:
optional.append("injection_caches={0}".format(self.injection_caches))
if self.model_info:
optional.append("model_info=" + json.dumps(self.model_info))
if self.ip:
optional.append("ip={0}".format(self.ip))
return "{0}({1})".format(":".join(repr(v) for v in common),
", ".join(optional))
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join(repr(v) for v in [
self.context_name,
self.rev_ids,
self.model_names,
"precache={0!r}".format(self.precache),
| "include_features={0!r}".format(self.include_features),
"injection_caches={0!r}".format(self.injection_caches),
"ip={0!r}".format(self.ip),
"model_info={0!r}".format(self.model_info)]))
de | f to_json(self):
return {
'context': self.context_name,
'rev_ids': list(self.rev_ids),
'model_names': list(self.model_names),
'precache': self.precache,
'include_features': self.include_features,
'injection_caches': self.injection_caches,
'ip': self.ip,
'model_info': self.model_info
}
@classmethod
def from_json(cls, data):
return cls(
data['context'],
set(data['rev_ids']),
set(data['model_names']),
precache=data['precache'],
include_features=data['include_features'],
injection_caches=data['injection_caches'],
model_info=data['model_info'],
ip=data['ip'])
|
kenrick95/airmozilla | airmozilla/roku/views.py | Python | bsd-3-clause | 5,212 | 0 | from django.contrib.sites.models import RequestSite
from django.shortcuts import render
from django.conf import settings
from django.db.models import Q
from django.core.urlresolvers import reverse
from airmozilla.main.models import Channel, Event
from airmozilla.main.views import is_contributor
from airmozilla.base.utils import (
paginate
)
from airmozilla.main.context_processors import get_featured_events
def categories_feed(request):
context = {}
privacy_filter = {}
privacy_exclude = {}
if request.user.is_active:
if is_contributor(request.user):
# feed_privacy = 'contributors'
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
# else:
# feed_privacy = 'company'
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
# feed_privacy = 'public'
events = Event.objects.scheduled().approved()
live_events = Event.objects.live().approved()
if privacy_filter:
events = events.filter(**privacy_filter)
live_events = live_events.filter(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
live_events = live_events.exclude(**privacy_exclude)
channels = get_channels(events)
context['channels'] = channels
context['live_events'] = live_events
prefix = request.is_secure() and 'https' or 'http'
root_url = '%s://%s' % (prefix, RequestSite(request).domain)
def abs_url_maker(viewname, *args, **kwargs):
return root_url + reverse(viewname, args=args, kwargs=kwargs)
context['abs_url'] = abs_url_maker
context['get_media_info'] = get_media_info
response = render(request, 'roku/categories.xml', context)
response['Content-Type'] = 'text/xml'
return response
def get_channels(events, parent=None):
channels = []
channels_qs = Channel.objects.all()
if parent is None:
channels_qs = channels_qs.filter(parent__isnull=True)
else:
channels_qs = channels_qs.filter(parent=parent)
for channel in channels_qs:
event_count = events.filter(channels=channel).count()
subchannel_count = Channel.objects.filter(parent=channel).count()
if event_count or subchannel_count:
# channel.subchannels = get_channels(events, parent=channel)
channels.append(channel)
def sorter(x, y):
if x.slug == settings.DEFAULT_CHANNEL_SLUG:
return -2
return cmp(x.name.lower(), y.name.lower())
channels.sort(sorter)
return channels
def get_media_info(event):
if event.template and 'vid.ly' in event.template.name.lower():
tag = event.template_environment['tag']
return {
# 'url': 'http://vid.ly/%s?content=video&format=webm' % tag,
# 'format': 'webm'
# NOTE that it's deliberately set to the HTTP URL. Not HTTPS :(
'url': 'http://vid.ly/%s?content=video&format=mp4' % tag,
'format': 'mp4'
}
elif event.template and 'hls' in event.template.name.lower():
try:
file = event.template_environment['file']
wowzaapp = event.template_environment.get('wowzaapp') or 'Edgecast'
return {
# it's important to use HTTP here :(
'url': (
'http://wowza1.cdn.mozilla.net/%s/ngrp:%s_all'
'/playlist.m3u8' % (wowzaapp, file)
),
'format': 'hls',
}
except KeyError:
pass
return None
def event_feed(request, id):
# return a feed containing exactly only one event
context = {}
events = Event.objects.filter(id=id)
context['events'] = events
context['get_media_info'] = get_media_info
response = render(request, 'roku/channel.xml', context)
response['Content-Type'] = 'text/xml'
return response
def channel_feed(request, slug):
# this slug might be the slug of a parent
channels = Channel.objects.filter(
Q(slug=slug) |
Q(parent__slug=slug)
)
events = Event.objects.archived().approved()
events = events.filter(channels__in=channels)
privacy_filte | r = {}
privacy_exclude = {}
if request.user.is_active:
if is_contributor(request.user):
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
if privacy_filter:
events = events.filt | er(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
events = events.order_by('-start_time')
paged = paginate(events, 1, 100)
return render_channel_events(paged, request)
def trending_feed(request):
events = get_featured_events(
None, # across all channels
request.user,
length=settings.TRENDING_ROKU_COUNT,
)
return render_channel_events(events, request)
def render_channel_events(events, request):
context = {}
context['events'] = events
context['get_media_info'] = get_media_info
response = render(request, 'roku/channel.xml', context)
response['Content-Type'] = 'text/xml'
return response
|
desihub/desisurvey | py/desisurvey/tiles.py | Python | bsd-3-clause | 19,547 | 0.000409 | """Manage static information associated with tiles, programs and passes.
Each tile has an assigned program name. The program names
(DARK, BRIGHT) are predefined in terms of conditions on the
ephemerides, but not all programs need to be present in a tiles file.
Pass numbers are arbitrary integers and do not need to be consecutive or dense.
To ensure consistent and efficient usage of static tile info, all code
should use::
tiles = desisurvey.tiles.get_tiles()
To use a non-standard tiles file, change the configuration before the
first call to ``get_tiles()`` with::
config = desisurvey.config.Configuration()
config.tiles_file.set_value(name)
The :class:`Tiles` class returned by :func:`get_tiles` is a wrapper around
the FITS table contained in a tiles file, that adds some precomputed derived
attributes for consistency and efficiency.
"""
from __future__ import print_function, division
import os
import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.table import Table
import desimodel.io
import desiutil.log
import desisurvey.config
import desisurvey.utils
import desisurvey.etc
class Tiles(object):
"""Manage static info associated with the tiles file.
Parameters
----------
tile_file : str or None
Name of the tiles file to use or None for the default specified
in our configuration.
"""
def __init__(self, tiles_file=None):
config = desisurvey.config.Configuration()
self.nogray = config.tiles_nogray()
bright_allowed_in_dark = getattr(
config, 'bright_allowed_in_dark', None)
if bright_allowed_in_dark is not None:
self.bright_allowed_in_dark = bright_allowed_in_dark()
else:
self.bright_allowed_in_dark = False
# Read the specified tiles file.
self.tiles_file = tiles_file or config.tiles_file()
self.tiles_file = find_tile_file(self.tiles_file)
tiles = self.read_tiles_table()
# Copy tile arrays.
self.tileID = tiles['TILEID'].data.copy()
self.tileRA = tiles['RA'].data.copy()
self.tileDEC = tiles['DEC'].data.copy()
self.tileprogram = np.array([p.strip() for p in tiles['PROGRAM']])
self.tilepass = tiles['PASS'].data.copy()
self.designha = None
if 'DESIGNHA' in tiles.dtype.names:
self.designha = tiles['DESIGNHA'].data.copy()
self.priority_boostfac = np.ones(len(tiles), dtype='f4')
if 'PRIORITY_BOOSTFAC' in tiles.dtype.names:
self.priority_boostfac = tiles['PRIORITY_BOOSTFAC']
self.tileobsconditions = self.get_conditions()
if self.nogray:
mgray = self.tileobsconditions == 'GRAY'
self.tileobsconditions[mgray] = 'DARK'
self.in_desi = tiles['IN_DESI'].data.copy() != 0
# Count tiles.
self.ntiles = len(self.tileID)
# Can remove this when tile_index no longer uses searchsorted.
if not np.all(np.diff(self.tileID) > 0):
raise RuntimeError('Tile IDs are not increasing.')
self.programs = [x for x in np.unique(tiles['PROGRAM'].data)]
self.program_index = {pname: pidx
for pidx, pname in enumerate(self.programs)}
# Build tile masks for each program. A program will no tiles with have an empty mask.
self.program_mask = {}
for p in self.programs:
self.program_mask[p] = (self.tileprogram == p) & self.in_desi
# Calculate and save dust exposure factors.
self.dust_factor = desisurvey.etc.dust_exposure_factor(tiles['EBV_MED'].data)
# Precompute coefficients to calculate tile observing airmass.
latitude = np.radians(config.location.latitude())
tile_dec_rad = np.radians(self.tileDEC)
self.tile_coef_A = np.sin(tile_dec_rad) * np.sin(latitude)
self.tile_coef_B = np.cos(tile_dec_rad) * np.cos(latitude)
# Placeholders for overlap attributes that are expensive to calculate
# so we use lazy evaluation the first time they are accessed.
self._overlapping = None
self._neighbors = None
self._fiberassign_delay = None
# Calculate the maximum |HA| in degrees allowed for each tile to stay
# above the survey minimum altitude
cosZ_min = np.cos(90 * u.deg - config.min_altitude())
cosHA_min = (
(cosZ_min - np.sin(self.tileDEC * u.deg) * np.sin(latitude)) /
(np.cos(self.tileDEC * u.deg) * np.cos(latitude))).value
cosHA_min = np.clip(cosHA_min, -1, 1)
self.max_abs_ha = np.degrees(np.arccos(cosHA_min))
m = ~np.isfinite(self.max_abs_ha) | (self.max_abs_ha < 3.75)
self.max_abs_ha[m] = 7.5 # always give at least a half hour window.
CONDITIONS = ['DARK', 'GRAY', 'BRIGHT']
CONDITION_INDEX = {cond: i for i, cond in enumerate(CONDITIONS)}
def airmass(self, hour_angle, mask=None):
"""Calculate tile airmass given hour angle.
Parameters
----------
hour_angle : array
Array of hour angles in degrees to use. If mask is None, then should have length
``self.ntiles``. Otherwise, should have a value per non-zero entry in the mask.
mask : array or None
Boolean mask of which tiles to perform the calculation for.
Returns
-------
array
Array of airmasses corresponding to each input hour angle.
"""
hour_angle = np.deg2rad(hour_angle)
if mask is None:
mask = slice(None)
cosZ = self.tile_coef_A[mask] + self.tile_coef_B[mask] * np.cos(hour_angle)
return desisurvey.utils.cos_zenith_to_airmass(cosZ)
def airmass_at_mjd(self, mjd, mask=None):
"""Calculate tile airmass at given MJD.
Parameters
----------
mjd : array
| Array of MJD to use. If mask is None, then should have length
``self.ntiles``. Otherwise, should have a value per non-zero entry
in the mask.
mask : array or None
Boolean mask of which tiles to perform the calculation for.
Returns
-------
array
Array of airmasses corresponding to each input hour angle.
" | ""
mjd = np.atleast_1d(mjd)
if len(mjd) == 0:
return np.zeros(0, dtype='f8')
tt = Time(mjd, format='mjd', location=desisurvey.utils.get_location())
lst = tt.sidereal_time('apparent').to(u.deg).value
ha = lst - self.tileRA[mask]
return self.airmass(ha, mask=mask)
def airmass_second_derivative(self, HA, mask=None):
"""Calculate second derivative of airmass with HA.
Useful for determining how close to design airmass we have to get
for different tiles. When this is large, we really need to observe
things right at their design angles. When it's small, we have more
flexibility.
"""
x = self.airmass(HA, mask=mask)
if mask is not None:
b = self.tile_coef_B[mask]
else:
b = self.tile_coef_B
d2rad = b*x**2 * (2*b*x*np.sin(np.radians(HA))**2 +
np.cos(np.radians(HA)))
return d2rad * (np.pi/180)**2
def index(self, tileID, return_mask=False):
"""Map tile ID to array index.
Parameters
----------
tileID : int or array
Tile ID value(s) to convert.
mask : bool
if mask=True, an additional mask array is returned, indicating which
IDs were present in the tile array. Otherwise, an exception is
raised if tiles were not found.
Returns
-------
int or array
Index into internal per-tile arrays corresponding to each input tile ID.
"""
scalar = np.isscalar(tileID)
tileID = np.atleast_1d(tileID)
if np.any(tileID < 0):
raise ValueError('tileIDs must positive!')
idx = np.searchsorted(self.tileID, tileID)
idx = np.clip(idx, 0, len(self.tileID)-1)
bad = self.ti |
belokop/indico_bare | indico/modules/rb/models/aspects_test.py | Python | gpl-3.0 | 1,299 | 0.00154 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distribu | ted in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# a | long with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.modules.rb.models.aspects import Aspect
pytest_plugins = 'indico.modules.rb.testing.fixtures'
def test_default_on_startup(dummy_location, db):
aspect = Aspect(name=u'Test', center_latitude='', center_longitude='', zoom_level=0, top_left_latitude=0,
top_left_longitude=0, bottom_right_latitude=0, bottom_right_longitude=0)
dummy_location.aspects.append(aspect)
db.session.flush()
assert not aspect.default_on_startup
dummy_location.default_aspect = aspect
db.session.flush()
assert aspect.default_on_startup
|
badloop/SickRage | lib/tvdb_api/tvdb_cache.py | Python | gpl-3.0 | 7,781 | 0.003984 | #!/usr/bin/env python2
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""
urllib2 caching handler
Modified from http://code.activestate.com/recipes/491261/
"""
from __future__ import with_statement
__author__ = "dbr/Ben"
__version__ = "1.9"
import os
import time
import errno
import httplib
import urllib2
import StringIO
from hashlib import md5
from threading import RLock
cache_lock = RLock()
def locked_function(origfunc):
"""Decorator to execute function under lock"""
def wrapped(*args, **kwargs):
cache_lock.acquire()
try:
return origfunc(*args, **kwargs)
finally:
cache_lock.release()
return wrapped
def calculate_cache_path(cache_location, url):
"""Checks if [cache_location]/[hash_of_url].headers and .body exist
"""
thumb = md5(url).hexdigest()
header = os.path.join(cache_location, thumb + ".headers")
body = os.path.join(cache_location, thumb + ".body")
return header, body
def check_cache_time(path, max_age):
"""Checks if a file has been created/modified in the [last max_age] seconds.
False means the file is too old (or doesn't exist), True means it is
up-to-date and valid"""
if not os.path.isfile(path):
return False
cache_modified_time = os.stat(path).st_mtime
time_now = time.time()
if cache_modified_time < time_now - max_age:
# Cache is old
return False
else:
return True
@locked_function
def exists_in_cache(cache_location, url, max_age):
"""Returns if header AND body cache file exist (and are up-to-date)"""
hpath, bpath = calculate_cache_path(cache_location, url)
if os.path.exists(hpath) and os.path.exists(bpath):
return(
check_cache_time(hpath, max_age)
and check_cache_time(bpath, max_age)
)
else:
# File does not exist
return False
@locked_function
def store_in_cache(cache_location, url, response):
"""Tries to store response in cache."""
hpath, bpath = calculate_cache_path(cache_location, url)
try:
outf = open(hpath, "wb")
headers = str(response.info())
outf.write(headers)
outf.close()
outf = open(bpath, "wb")
outf.write(response.read())
outf.close()
except IOError:
return True
else:
return False
@locked_function
def delete_from_cache(cache_location, url):
"""Deletes a response in cache."""
hpath, bpath = calculate_cache_path(cache_location, url)
try:
if os.path.exists(hpath):
os.remove(hpath)
if os.path.exists(bpath):
os.remove(bpath)
except IOError:
return True
else:
return False
class CacheHandler(urllib2.BaseHandler):
"""Stores responses in a persistant on-disk cache.
If a subsequent GET request is made for the same URL, the stored
response is returned, saving time, resources and bandwidth
"""
@locked_function
def __init__(self, cache_location, max_age = 21600):
"""The location of the cache directory"""
self.max_age = max_age
self.cache_location = cache_location
if not os.path.exists(self.cache_location):
try:
os.mkdir(self.cache_location)
except OSError, e:
if e.errno == errno.EEXIST and os.path.isdir(self.cache_location):
# File exists, and it's a directory,
# another process beat us to creating this dir, that's OK.
pass
else:
# Our target dir is already a file, or different error,
# relay the error!
raise
def default_open(self, request):
"""Handles GET requests, if the response is cached it returns it
"""
if request.get_method() != "GET":
return None # let the next handler try to handle the request
|
if exists_in_cache(
self.cache_location, request.get_full_url(), self.max_age
):
return CachedResponse(
self.cache_location,
request.get_full_url(),
set_cache_header = True
)
else:
return None
def http_response(self, request, response):
"""Gets a HTTP response, if it was a GET | request and the status code
starts with 2 (200 OK etc) it caches it and returns a CachedResponse
"""
if (request.get_method() == "GET"
and str(response.code).startswith("2")
):
if 'x-local-cache' not in response.info():
# Response is not cached
set_cache_header = store_in_cache(
self.cache_location,
request.get_full_url(),
response
)
else:
set_cache_header = True
return CachedResponse(
self.cache_location,
request.get_full_url(),
set_cache_header = set_cache_header
)
else:
return response
class CachedResponse(StringIO.StringIO):
"""An urllib2.response-like object for cached responses.
To determine if a response is cached or coming directly from
the network, check the x-local-cache header rather than the object type.
"""
@locked_function
def __init__(self, cache_location, url, set_cache_header=True):
self.cache_location = cache_location
hpath, bpath = calculate_cache_path(cache_location, url)
StringIO.StringIO.__init__(self, file(bpath, "rb").read())
self.url = url
self.code = 200
self.msg = "OK"
headerbuf = file(hpath, "rb").read()
if set_cache_header:
headerbuf += "x-local-cache: %s\r\n" % (bpath)
self.headers = httplib.HTTPMessage(StringIO.StringIO(headerbuf))
def info(self):
"""Returns headers
"""
return self.headers
def geturl(self):
"""Returns original URL
"""
return self.url
@locked_function
def recache(self):
new_request = urllib2.urlopen(self.url)
set_cache_header = store_in_cache(
self.cache_location,
new_request.url,
new_request
)
CachedResponse.__init__(self, self.cache_location, self.url, True)
@locked_function
def delete_cache(self):
delete_from_cache(
self.cache_location,
self.url
)
if __name__ == "__main__":
def main():
"""Quick test/example of CacheHandler"""
opener = urllib2.build_opener(CacheHandler("/tmp/"))
response = opener.open("http://google.com")
print response.headers
print "Response:", response.read()
response.recache()
print response.headers
print "After recache:", response.read()
# Test usage in threads
from threading import Thread
class CacheThreadTest(Thread):
lastdata = None
def run(self):
req = opener.open("http://google.com")
newdata = req.read()
if self.lastdata is None:
self.lastdata = newdata
assert self.lastdata == newdata, "Data was not consistent, uhoh"
req.recache()
threads = [CacheThreadTest() for x in range(50)]
print "Starting threads"
[t.start() for t in threads]
print "..done"
print "Joining threads"
[t.join() for t in threads]
print "..done"
main()
|
codeadict/kushillu | kushillu/views.py | Python | mit | 8,944 | 0.001789 | import datetime
import uuid
import pytz
import os
import time
from django.contrib.sites.shortcuts import get_current_site
from django.core.servers.basehttp import FileWrapper
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse, resolve
from django.db.models import FieldDoesNotExist
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.decorators.http import require_POST
from django.views.generic.base import TemplateView
from django.views.generic import DetailView
from django.views.generic.list import ListView
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.humanize.templatetags.humanize import naturaltime
from . import cid
from .models import BuildInfo, Project
from settings import MAX_CONCURRENT_BUILDS
def cid_context(request):
"""
Main context processor, adds main menu.
"""
main_menu = []
if request.user.is_authenticated():
if request.user.is_staff:
main_menu = []
else:
main_menu = []
# TODO: add github url here
return {'request': request,
'main_menu': main_menu,
'admin_access': request.user.is_staff,
'messages': []}
class PageWithAjax(TemplateView):
template_name = "ajax_page.jinja"
ajax_url = None
def dispatch(self, request, *args, **kwargs):
self.ajax_url = self.kwargs.pop('ajax_url')
return super(PageWithAjax, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PageWithAjax, self).get_context_data(**kwargs)
ajax_url = reverse(self.ajax_url, args=self.args, kwargs=self.kwargs)
context['ajax_url'] = ajax_url
response = resolve(ajax_url).func(self.request, *self.args, **self.kwargs)
content = getattr(response, 'rendered_content')
context['initial_content'] = content
return context
page_with_ajax = login_required(PageWithAjax.as_view())
class BuildMixin(object):
status = 200
model = None
link_column = None
columns = []
live_times = []
live_times = ['time_taken']
def render_to_response(self, context, **response_kwargs):
return super(BuildMixin, self).render_to_response(context, status=self.status, **response_kwargs)
def get_context_data(self, **kwargs):
context = super(BuildMixin, self).get_context_data(**kwargs)
context['columns'] = self.columns
context['link_column'] = self.link_column
context['headings'] = self._headings()
context['get_value'] = self._get_value
context['get_verbose_name'] = self._get_verbose_name
context['live_times'] = self.live_times
return context
def _headings(self):
for attr_name in self.columns:
yield self._get_verbose_name(attr_name)
def _get_verbose_name(self, attr_name):
meta = self.model._meta
try:
field = meta.get_field_by_name(attr_name)[0]
return field.verbose_name
except FieldDoesNotExist:
if hasattr(self.model, attr_name) and hasattr(getattr(self.model, attr_name), 'short_description'):
return getattr(self.model, attr_name).short_description
else:
return attr_name
def _get_value(self, obj, attr_name):
value = getattr(obj, attr_name)
if hasattr(value, '__call__'):
value = value()
if attr_name in self.live_times and isinstance(value, datetime.datetime):
return '<span class="live-time" data-start="%s"></span>' % value.isoformat(), True
if isinstance(value, datetime.datetime):
value = naturaltime(value)
return value, False
class BuildList(BuildMixin, ListView):
"""
List of previous builds
"""
model = BuildInfo
template_name = 'build_list.jinja'
link_column = 'created'
columns = ('created', 'time_taken', 'trigger', 'label', 'author', 'show_coverage', 'successful')
paginate_by = 50
def dispatch(self, request, *args, **kwargs):
if not any_active_builds(self.request):
self.status = 201
return super(BuildList, self).dispatch(request, *args, **kwargs)
build_list_ajax = login_required(BuildList.as_view())
class BuildDetails(BuildMixin, DetailView):
"""
details of a build.
"""
model = BuildInfo
template_name = 'build.jinja'
columns = ('created',
'modified',
'time_taken',
'trigger',
'action',
'label',
'on_master',
'fetch_branch',
'commit_url',
'author',
'complete',
'queued',
'test_success',
'test_passed',
'container',)
def get_context_data(self, **kwargs):
self.object = che | ck(self.request, self.object)
if self.object.complete:
self.status = 202
if self.object.process_log | :
self.object.process_log = self.object.process_log.replace(self.object.project.github_token,
'<github token>')
return super(BuildDetails, self).get_context_data(**kwargs)
build_details_ajax = login_required(BuildDetails.as_view())
@csrf_exempt
@require_POST
def webhook(request, pk):
project = get_project(pk)
if not project:
return HttpResponse('no project created', status=403)
# this makes it even more impossible to guess key via brute force
time.sleep(0.2)
build_info = BuildInfo.objects.create(project=project)
response_code, build_info2 = cid.process_github_webhook(request, build_info)
if response_code == 202:
set_site(build_info.project, request)
if _start_queue_build(build_info2):
msg = 'build started, id = %d' % build_info2.id
else:
msg = 'build queued, id = %d' % build_info2.id
response_code = 201
else:
build_info.delete()
msg = str(build_info2)
return HttpResponse(msg, status=response_code)
def status_svg(request, pk):
project = get_project(pk)
svg = project.status_svg if project else 'null.svg'
svg_path = os.path.join(os.path.dirname(__file__), 'static', svg)
response = HttpResponse(FileWrapper(open(svg_path)), content_type='image/svg+xml')
response['Etag'] = '"%s"' % uuid.uuid4()
response['Cache-Control'] = 'no-cache'
response['Expires'] = datetime.datetime.now().replace(tzinfo=pytz.UTC).strftime('%a, %d %b %Y %H:%M:%S %Z')
return response
@login_required
@require_POST
def go_build(request):
project = get_project()
if project:
set_site(project, request)
build_info = BuildInfo.objects.create(trigger='manual',
author=request.user.username,
project=project,
on_master=True)
if not _start_queue_build(build_info):
messages.info(request, 'build queued')
else:
messages.warning(request, 'No project created')
return redirect(reverse('build-list'))
def _start_queue_build(build_info):
"""
Check whether the build can begin immediately or needs to be queued.
If it can start; start it, else set queued to True and save build_info.
:param build_info: BuildInfo instance to queue or start
:returns: True if build started, else False
"""
if BuildInfo.objects.filter(complete=False, queued=False).count() >= MAX_CONCURRENT_BUILDS:
build_info.queued = True
build_info.save()
else:
cid.build(build_info)
return not build_info.queued
def check(request, build_info):
bi = build_info
try:
set_site(bi.project, request)
bi = cid.check(build_info)
except cid.KnownError, e:
messages.error(request, str(e))
bi = build_info
finally:
return bi
def check_build |
UdK-VPT/Open_eQuarter | mole3x/extensions/acqu_berlin/fbinter_wfs_buildings_alk.py | Python | gpl-2.0 | 6,104 | 0.012615 | # -*- coding: utf-8 -*-
from qgis.core import NULL
from mole3.project import config
from mole3.oeq_global import OeQ_get_bld_id, isnull
from qgis.PyQt import QtGui, QtCore
def load(self=None):
self.load_wfs()
return True
def preflight(self=None):
from mole3.project import config
from qgis.PyQt.QtCore import QVariant
from qgis.core import QgsField
from mole3.qgisinteraction.layer_interaction import add_attributes_if_not_exists
from mole3.oeq_global import OeQ_get_bld_id
layer = self.layer()
#print layer.name()
if layer == None:
return False
features = layer.getFeatures()
provider = layer.dataProvider()
#in the Berlin Hausumringe WFS there are additional Features that describe specific building parts. As they are not relevant here they are removed by usig the key "Bauart_sch"
to_remove =[]
for i in features:
try:
if (not isnull(i['BAT'])) & (0 != i['BAT']):
to_remove.append(i.id())
except:
return False
provider.deleteFeatures(to_remove)
# in the Berlin Hausumringe WFS there are additional Attributes that are not important here. they are removed
conversion_fields = [['AOG','FLRS_ALK'],['GFK','FUNC_ALK'],['BAW','KIND_ALK'],['GKN','GKN_ALK']]
fields = [f for f in provider.fields() if f.name() not in [i[0] for i in conversion_fields]]
fieldnames =[field.name() for field in fields]
to_remove = []
count = 0
for field in provider.fields():
if field.name() in fieldnames:
to_remove.append(count)
count += 1
provider.deleteAttributes(to_remove)
layer.updateFields()
# in the Berlin Hausumringe WFS there are additional Attributes that are not important here. they are removed
layer.startEditing()
for cf in conversion_fields:
count = 0
for field in provider.fields():
#print field.name()
if field.name() == cf[0]:
layer.renameAttribute(count,cf[1])
break
count += 1
layer.commitChanges()
# create building_ids
add_attributes_if_not_exists(layer, [QgsField(config.building_id_key,QVariant.String)])
layer.updateFields()
| features = layer.getFeatures()
layer.startEditing()
for i in features:
i[config.building_id_key] = OeQ_get_bld_id()
layer.updateFeature(i)
layer.commitChanges()
return True
def evaluation(self=None, parameters={},feature=None):
from qgis.PyQt.QtCore import QVariant
from qgis.core import QgsDistanceArea, QgsC | oordinateReferenceSystem
ar = NULL
per = NULL
id = NULL
flr = NULL
usage = NULL
kind = NULL
gkn = NULL
da_engine=QgsDistanceArea()
#print (QgsCoordinateReferenceSystem(int(config.project_crs.split(':')[-1])))
#print(QgsCoordinateReferenceSystem.EpsgCrsId)
#da_engine.setSourceCrs(QgsCoordinateReferenceSystem(int(config.project_crs.split(':')[-1]), QgsCoordinateReferenceSystem.EpsgCrsId))
#da_engine.setEllipsoid(config.project_ellipsoid)
# da_engine.setEllipsoidalMode(True)
if feature:
geometry = feature.geometry()
#print geometry
ar = da_engine.measureArea(geometry)
per =da_engine.measurePerimeter(geometry)
id = feature[config.building_id_key] #necessary to safe dependency check
flr = feature['FLRS_ALK'] # necessary to safe dependency check
usage = feature['FUNC_ALK'] # necessary to safe dependency check
kind = feature['KIND_ALK'] # necessary to safe dependency check
gkn = feature['GKN_ALK'] # necessary to safe dependency check
#print ar
#print per
#print id
return {config.building_id_key: {'type': QVariant.String,
'value': id},
'AREA_ALK': {'type': QVariant.Double,
'value': ar},
'PERI_ALK': {'type': QVariant.Double,
'value': per},
'FLRS_ALK': {'type': QVariant.Double,
'value': flr},
'FUNC_ALK': {'type': QVariant.Double,
'value': usage},
'KIND_ALK': {'type': QVariant.Double,
'value': kind},
'GKN_ALK': {'type': QVariant.String,
'value': gkn}
}
def postflight(self=None):
return True
#return self.createDatabase()
import os
from mole3.extensions import OeQExtension
from mole3.project import config
extension = OeQExtension(
extension_id=__name__,
category='',
subcategory='',
extension_name='Building Outlines (ALK, WFS)',
extension_type='basic',
field_id='', #used for point sampling tool
par_in= [], #[config.building_id_key,'AREA','PERIMETER'],
#par_out=[config.building_id_key,'AREA','PERIMETER'],
source_type='wfs',
layer_name=config.building_outline_layer_name,
sourcelayer_name=config.building_outline_layer_name,
targetlayer_name=config.building_outline_layer_name,#config.data_layer_name,
active=True,
description='',
#source='http://fbinter.stadt-berlin.de/fb/wfs/data/senstadt/s_wfs_alkis_gebaeudeflaechen?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=fis:s_wfs_alkis_gebaeudeflaechen&SRSNAME=EPSG:25833',
source='http://fbinter.stadt-berlin.de/fb/wfs/data/senstadt/s_wfs_alkis_gebaeudeflaechen?SERVICE=WFS&REQUEST=GetFeature&VERSION=2.0.0&TYPENAME=fis:s_wfs_alkis_gebaeudeflaechen&SRSNAME=urn:ogc:def:crs:EPSG:6.9:25833',
#source='http://fbinter.stadt-berlin.de/fb/wfs/geometry/senstadt/re_hausumringe?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=fis:re_hausumringe&SRSNAME=EPSG:25833',
source_crs='EPSG:25833',
bbox_crs='EPSG:25833',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
load_method= load,
preflight_method = preflight,
evaluation_method= evaluation,
postflight_method = None)
extension.registerExtension(default=True)
|
lukaszb/nose-alert | nosealert/tests/test_plugin.py | Python | bsd-2-clause | 1,067 | 0.000937 | import unittest
from mock import Mock
from nosealert.plugin import AlertPlugin
from nosealert.notifications import Notification
class TestAlertPlugin(unittest.TestCase):
def setUp(self):
self.plugin = AlertPlugin()
def test_get_notification_success(self):
result = Mock(
failures=[],
errors=[],
testsRun=3,
)
self.assertEqual(self.plugin.get_notification(result), Notification(
total=3,
))
def test_get_notification_with_f | ails(self):
result = Mock(
failures=[1, 2],
errors=[3],
testsRun=5,
)
self.assertEqual(self.plugin.get_notification(result), Notification(
fails=2,
errors=1,
total=5,
))
def test_finalize_sends_notification(self):
not | ification = Mock()
result = Mock()
self.plugin.get_notification = Mock(return_value=notification)
self.plugin.finalize(result)
notification.send.assert_called_once_with()
|
1tush/reviewboard | reviewboard/admin/validation.py | Python | mit | 1,557 | 0.000642 | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
def validate_bug_tracker(input_url):
"""
Validates that an issue tracker URI string contains one `%s` Python format
specification type (no other types are supported).
"""
try:
# Ignore escaped `%`'s
test_url = input_url.replace('%%', '')
if test_url.find('%s') == -1:
raise TypeError
# Ensure an arbitrary value can be inserted into the URL string
test_url = test_url % 1
except (TypeError, ValueError):
raise ValidationError([
_("%s has invalid format specification type(s). Use only one "
"'%%s' to mark the location of the bug id. If the URI contains "
"encoded values (e.g. '%%20'), prepend the encoded values with "
"an additional '%%'.") % input_url])
def validate_bug_tracker_base_hosting_url(input_url):
"""Check that hosting service bug URLs don't contain %s."""
# Try formatting the URL using an empty tuple to verify that it
# doesn't contain any format characters.
try:
input_url % ()
excep | t TypeError:
raise ValidationError([
_("The URL '%s' is not valid because it contains a format "
" | character. For bug trackers other than 'Custom Bug Tracker', "
"use the base URL of the server. If you need a '%%' character, "
"prepend it with an additional '%%'.") % input_url])
|
ThiefMaster/indico | indico/modules/events/reminders/forms.py | Python | mit | 5,113 | 0.004694 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from wtforms.fields import BooleanField, SelectField, TextAreaField
from wtforms.validators import DataRequired, ValidationError
from indico.modules.events.models.events import EventType
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import EmailListField, IndicoDateTimeField, IndicoRadioField, TimeDeltaField
from indico.web.forms.validators import DateTimeRange, HiddenUnless
class ReminderForm(IndicoForm):
recipient_fields = ['recipients', 'send_to_participants', 'send_to_speakers']
schedule_fields = ['schedule_type', 'absolute_dt', 'relative_delta']
schedule_recipient_fields = recipient_fields + schedule_fields
# Schedule
schedule_type = IndicoRadioField(_('Type'), [DataRequired()],
choices=[('relative', _('Relative to the event start time')),
('absolute', _('Fixed date/time')),
('now', _('Send immediately'))])
relative_delta = TimeDeltaField(_('Offset'), [HiddenUnless('schedule_type', 'relative'), DataRequired()])
absolute_dt = IndicoDateTimeField(_('Date'), [HiddenUnless('schedule_type', 'absolute'), DataRequired(),
DateTimeRange()])
# Recipients
recipients = EmailListField(_('Email addresses'), description=_('One email address per line.'))
send_to_participants = BooleanField(_('Participants'),
description=_('Send the reminder to all participants/registrants '
'of the event.'))
send_to_speakers = BooleanField(_('Speakers'),
description=_('Send the reminder to all speakers/chairpersons of the event.'))
# Misc
reply_to_address = SelectField(_('Sender'), [DataRequired()],
description=_('The email address that will show up as the sender.'))
message = TextAreaField(_('Note'), description=_('A custom message to include in the email.'))
include_summary = BooleanField(_('Include agenda'),
description=_("Includes a simple text version of the event's agenda in the email."))
include_description = BooleanField(_('Include description'),
description=_("Includes the event's description in the email."))
attach_ical = BooleanField(_('Attach iCalendar file'),
description=_('Attach an iCalendar file to the event reminder.'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.timezone = self.event.timezone
super().__init__(*args, **kwargs)
self.reply_to_address.choices = (list(self.event
.get_allowed_sender_emails(extra=self.reply_to_address.object_data).items()))
if self.event.type_ == EventType.lecture:
del self.include_summary
def validate_recipients(self, field):
if not field.data and not self.send_to_participants.data and not self.send_to_speakers.data:
raise ValidationError(_('At least one type of recipient is required.'))
def validate_send_to_participants(self, field):
if not field.data and not self.recipients.data and not self.send_to_speakers.data:
raise ValidationError(_('At least one type of recipient is required.'))
def validate_send_to_speakers(self, field):
if not field.data and not self.recipients.data and not self.send_to_participants.data:
| raise ValidationError(_('At least one type of recipient is required.'))
def validate_schedule_type(self, field):
# Be graceful and allow a reminder that's in the past but on the same day.
# It will be sent immediately but that way we are a little bit more user-friendly
if field.data == 'now':
| return
scheduled_dt = self.scheduled_dt.data
if scheduled_dt is not None and scheduled_dt.date() < now_utc().date():
raise ValidationError(_('The specified date is in the past'))
@generated_data
def scheduled_dt(self):
if self.schedule_type.data == 'absolute':
if self.absolute_dt.data is None:
return None
return self.absolute_dt.data
elif self.schedule_type.data == 'relative':
if self.relative_delta.data is None:
return None
return self.event.start_dt - self.relative_delta.data
elif self.schedule_type.data == 'now':
return now_utc()
@generated_data
def event_start_delta(self):
return self.relative_delta.data if self.schedule_type.data == 'relative' else None
|
nachandr/cfme_tests | cfme/scripting/release.py | Python | gpl-2.0 | 6,579 | 0.00304 | #!/usr/bin/env python3
import re
import sys
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
import click
import github
import tabulate
from cfme.utils.conf import docker
REPO_NAME = "ManageIQ/integration_tests"
MASTER = 'master'
HEADERS = ['PR', 'Labels', 'Author', 'Title']
FULL_HEADERS = HEADERS + ['DESCRIPTION']
VALID_LABELS = [
"ansible",
"blackify",
"blockers-only",
"collections-conversion",
"customer-case",
"doc",
"enhancement",
"fix-framework",
"fix-locator-or-text",
"fix-test",
"implement-ssui",
"infra-related",
"issue-bug",
"issue-rfe",
"LegacyBranch",
"manual",
"new-test-or-feature",
"Nuage",
"other",
"py3-compat",
"rc-regression-fix",
"Redfish",
"requirements",
"RHV",
"sprout",
"tech-debt",
"test-automation",
"test-cleanup",
"widgetastic-conversion",
]
IGNORED_LABELS = ['lint-ok', 'WIP-testing']
PR_LINK = "[{pr}](https://github.com/ManageIQ/integration_tests/pull/{pr})"
def clean_commit(commit_msg):
replacements = ["1LP", "RFR", "WIP", "WIPTEST", "NOTEST"]
for replacement in replacements:
commit_msg = commit_msg.replace(f"[{replacement}]", "")
return commit_msg.strip(" ")
def clean_body(string):
if string is None:
string = ""
pytest_match = re.findall(r"({{.*}}\s*)", string, flags=re.S | re.M)
if pytest_match:
string = string.replace(pytest_match[0], "")
return string
def get_prs(release, old_release, gh):
"""Get merged PRs between the given releases
This is a bit screwy, GH API doesn't support directly listing PRs merged between tags/releases
The graphql v4 endpoint may provide a slightly better way to fetch these objects
but still its not directly filtered by the desired range on the request
"""
# GH searching supports by datetime, so get datetimes for the release obje | cts
old_date = old_release.created_at
# add to start datetime else it will include the commit for the release, not part of the diff
old_date = old_date + timedelta(seconds=5)
if release == MASTER:
new_date = datetime.now()
else:
new_date = release.created_at
pulls = gh.search_issues | (
"", # empty query string, required positional arg
type="pr",
repo=REPO_NAME,
merged=f'{old_date.isoformat()}..{new_date.isoformat()}', # ISO 8601 datetimes for range
)
prs = []
pr_nums_without_label = []
for pr in pulls:
prs.append(pr)
for label in pr.labels:
if label.name in VALID_LABELS:
pr.label = label.name
break
else:
pr_nums_without_label.append(pr.number)
if pr_nums_without_label:
pr_list = '\n'.join(str(p) for p in pr_nums_without_label)
click.echo(f'The following PRs are missing correct labels:\n{pr_list}',
err=True)
label_list = ', '.join(VALID_LABELS)
click.echo(f"Recognized labels:\n {label_list}")
sys.exit(1)
return prs
@click.command(help="Assist in generating release changelog")
@click.argument("tag")
@click.option(
"--old-tag",
help="Build the changelog from a previous tag",
)
@click.option(
"--full",
"report_type",
flag_value="full",
help="Generates a full report with all PR description",
)
@click.option(
"--brief",
"report_type",
flag_value="brief",
default=True,
help="Generates report with PR, label, author, and title in 4 columns"
)
@click.option(
"--stats",
"report_type",
flag_value="stats",
help="Generates stats only report"
)
@click.option(
"--all",
"report_type",
flag_value="all",
help="Generates stats and brief report together"
)
@click.option(
"--links/--no-links",
default=False,
help="Include PR links in markdown"
)
@click.option(
"--format",
"tableformat",
default='github',
type=click.Choice(tabulate.tabulate_formats, case_sensitive=True),
help="The tablefmt option for python tabulate"
)
def main(tag, old_tag, report_type, links, tableformat):
"""Script to assist in generating the release changelog
This script will generate a simple or full diff of PRs that are merged
and present the data in a way that is easy to copy/paste into an email
or git tag.
"""
click.echo(f"Report Includes: {old_tag} -> {tag}")
gh = github.Github(docker.gh_token)
repo = gh.get_repo(REPO_NAME)
release = tag if tag == MASTER else repo.get_release(tag)
old_release = repo.get_release(old_tag)
prs = get_prs(release, old_release, gh)
if report_type in ("full", "brief", "all"):
# Print a markdown table of PR attributes, including description for full report type
report_data = []
for pr in prs:
pr_attrs = [
pr.number if not links else PR_LINK.format(pr=pr.number),
', '.join(label.name for label in pr.labels if label.name in VALID_LABELS),
pr.user.login,
clean_commit(pr.title)
]
if report_type == 'full':
pr_attrs.append(clean_body(pr.body))
report_data.append(pr_attrs)
click.echo(tabulate.tabulate(report_data,
headers=FULL_HEADERS if report_type == 'full' else HEADERS,
tablefmt=tableformat))
elif report_type in ["stats", "all"]:
labels = defaultdict(int)
authors = defaultdict(int)
for pr in prs:
for label_name in [l.name for l in pr.labels if l.name not in IGNORED_LABELS]:
labels[label_name] += 1
authors[pr.user.login] += 1
# Label stats
click.echo(tabulate.tabulate(sorted(labels.items(),
key=lambda item: item[1],
reverse=True),
headers=["Label", "Number of PRs"],
tablefmt=tableformat))
click.echo('======================================')
# Author stats
click.echo(tabulate.tabulate(sorted(authors.items(),
key=lambda item: item[1],
reverse=True),
headers=["Author", "Number of PRs"],
tablefmt=tableformat))
if __name__ == "__main__":
main()
|
SSNico/XLParser | lib/Irony/Irony.Samples/SourceSamples/fib.py | Python | mpl-2.0 | 97 | 0.123711 | def fib (n):
if n < 2:
return 1
| else:
return fib (n - | 1) + fib(n - 2)
fib(34)
|
DedMemez/ODS-August-2017 | parties/PartyCogActivity.py | Python | apache-2.0 | 29,414 | 0.002618 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.parties.PartyCogActivity
from panda3d.core import CollideMask, CollisionHandler, CollisionHandlerEvent, CollisionNode, CollisionSphere, NodePath, Point3, TextNode, Texture
from direct.interval.MetaInterval import Sequence, Parallel, Track
from direct.interval.FunctionInterval import Func, Wait
from direct.interval.SoundInterval import SoundInterval
from direct.interval.ActorInterval import ActorInterval
from direct.interval.ProjectileInterval import ProjectileInterval
from direct.distributed.ClockDelta import globalClockDelta
from direct.showbase.PythonUtil import bound, lerp
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase.ToontownTimer import ToontownTimer
import PartyGlobals
import PartyCogUtils
from PartyCog import PartyCogManager
from PartyCogActivityPlayer import PartyCogActivityPlayer
from PartyCogActivityPlayer import PartyCogActivityLocalPlayer
from StretchingArrow import StretchingArrow
class PartyCogActivity(DirectObject):
notify = directNotify.newCategory('PartyCogActivity')
cog = None
arena = None
player = None
players = {}
def __init__(self, activity, arenaModel = None, texture = None):
self.activity = activity
self.root = self.activity.root
self.toonPieTracks = {}
self.toonPieEventNames = {}
self.toonIdsToAnimIntervals = {}
self.pieIvals = []
self.resultsIval = None
self.arenaModel = arenaModel
self.texture = texture
return
def load(self):
self.arena = loader.loadModel(self.arenaModel)
self.arena.reparentTo(self.root)
ground = self.arena.find('**/ground')
ground.setBin('ground', 1)
entranceArrows = self.arena.findAllMatches('**/arrowFlat*')
for arrow in entranceArrows:
arrow.setBin('ground', 5)
self.leftEntranceLocator = self.arena.find('**/leftEntrance_locator')
self.rightEntranceLocator = self.arena.find('**/rightEntrance_locator')
self.leftExitLocator = self.arena.find('**/leftExit_locator')
self.rightExitLocator = self.arena.find('**/rightExit_locator')
self.teamCamPosLocators = (self.arena.find('**/team0CamPos_locator'), self.arena.find('**/team1CamPos_locator'))
self.teamCamAimLocators = (self.arena.find('**/team0CamAim_locator'), self.arena.find('**/team1CamAim_locator'))
leftTeamLocator = NodePath('TeamLocator-%d' % PartyGlobals.TeamActivityTeams.LeftTeam)
leftTeamLocator.reparentTo(self.root)
leftTeamLocator.setH(90)
rightTeamLocator = NodePath('TeamLocator-%d' % PartyGlobals.TeamActivityTeams.RightTeam)
rightTeamLocator.reparentTo(self.root)
rightTeamLocator.setH(-90)
self.teamLocators = (leftTeamLocator, rightTeamLocator)
self._lengthBetweenEntrances = self.leftEntranceLocator.getY() - self.rightExitLocator.getY()
self._skyCollisionsCollection = self.arena.findAllMatches('**/cogPieArena_sky*_collision')
if len(self._skyCollisionsCollection) > 0:
self._skyCollisionParent = self._skyCollisionsCollection[0].getParent()
else:
self._skyCollisionParent = self.arena
self._wallCollisionsCollection = self.arena.findAllMatches('**/cogPieArena_wall*_collision')
self._arenaFlagGroups = (self.arena.find('**/flagsL_grp'), self.arena.find('**/flagsR_grp'))
self._initArenaDoors()
self.cogManager = PartyCogManager()
self.arrows = []
self.distanceLabels = []
self.teamColors = list(PartyGlobals.CogActivityColors) + [PartyGlobals.TeamActivityStatusColor]
for i in xrange(3):
start = self.arena.find('**/cog%d_start_locator' % (i + 1))
end = self.arena.find('**/cog%d_end_locator' % (i + 1))
cog = self.cogManager.generateCog(self.arena)
cog.setEndPoints(start.getPos(), end.getPos())
arrow1 = StretchingArrow(self.arena, useColor='orange')
arrow2 = StretchingArrow(self.arena, useColor='blue')
arrow1.setZ(0.1)
arrow2.setZ(0.1)
self.arrows.append([arrow1, arrow2])
distanceLabel = self.createDistanceLabel(0, self.teamColors[1])
distanceLabel[0].stash()
distanceLabel2 = self.createDistanceLabel(0, self.teamColors[0])
distanceLabel2[0].stash()
self.distanceLabels.append([distanceLabel, distanceLabel2])
self.winText = []
text1 = self.createText(0, Point3(-0.5, 0.0, -0.5), self.teamColors[1])
text2 = self.createText(1, Point3(0.5, 0.0, -0.5), self.teamColors[0])
self.winText.append(text1)
self.winText.append(text2)
self.winStatus = self.createText(2, Point3(0.0, 0.0, -0.8), self.teamColors[0])
signLocator = self.arena.find('**/eventSign_locator')
self.activity.sign.setPos(signLocator.getPos(self.root))
if self.texture:
textureAlpha = self.texture[:-4] + '_a.rgb'
reskinTexture = loader.loadTexture(self.texture, textureAlpha)
self.arena.find('**/center_grp').setTexture(reskinTexture, 100)
self.arena.find('**/leftSide_grp').setTexture(reskinTexture, 100)
self.arena.find('**/rightSide_grp').setTextu | re(reskinTexture, 100)
self.enable()
def _initArenaDoors(self):
self._arenaDoors = (self.arena.find('**/doorL'), self.arena.find('**/doorR'))
arenaDo | orLocators = (self.arena.find('**/doorL_locator'), self.arena.find('**/doorR_locator'))
for i in xrange(len(arenaDoorLocators)):
arenaDoorLocators[i].wrtReparentTo(self._arenaDoors[i])
self._arenaDoorTimers = (self.createDoorTimer(PartyGlobals.TeamActivityTeams.LeftTeam), self.createDoorTimer(PartyGlobals.TeamActivityTeams.RightTeam))
self._arenaDoorIvals = [None, None]
self._doorStartPos = []
for i in xrange(len(self._arenaDoors)):
door = self._arenaDoors[i]
timer = self._arenaDoorTimers[i]
timer.reparentTo(arenaDoorLocators[i])
timer.hide()
self._doorStartPos.append(door.getPos())
door.setPos(door, 0, 0, -7.0)
return
def _destroyArenaDoors(self):
for ival in self._arenaDoorIvals:
ival.finish()
self._arenaDoorIvals = None
self._arenaDoors = None
for timer in self._arenaDoorTimers:
timer.stop()
timer.removeNode()
self._arenaDoorTimers = None
return
def createDoorTimer(self, team):
timer = ToontownTimer(useImage=False, highlightNearEnd=False)
timer['text_font'] = ToontownGlobals.getMinnieFont()
timer.setFontColor(PartyGlobals.CogActivityColors[team])
timer.setScale(7.0)
timer.setPos(0.2, -0.03, 0.0)
return timer
def createText(self, number, position, color):
text = TextNode('winText%d' % number)
text.setAlign(TextNode.ACenter)
text.setTextColor(color)
text.setFont(ToontownGlobals.getSignFont())
text.setText('')
noteText = aspect2d.attachNewNode(text)
noteText.setScale(0.2)
noteText.setPos(position)
noteText.stash()
return (text, noteText)
def createDistanceLabel(self, number, color):
text = TextNode('distanceText-%d' % number)
text.setAlign(TextNode.ACenter)
text.setTextColor(color)
text.setFont(ToontownGlobals.getSignFont())
text.setText('10 ft')
node = self.root.attachNewNode(text)
node.setBillboardPointEye()
node.setScale(2.5)
node.setZ(5.0)
return (node, text)
def unload(self):
self.disable()
self._cleanupResultsIval()
if self.winText is not None:
fo |
hexforge/pulp_db | experiments/tries/comparison/datrie/bench/speed.py | Python | apache-2.0 | 9,330 | 0.004089 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import random
import string
import timeit
import os
import zipfile
import datrie
def words100k():
zip_name = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'words100k.txt.zip'
)
zf = zipfile.ZipFile(zip_name)
txt = zf.open(zf.namelist()[0]).read().decode('utf8')
return txt.splitlines()
def random_words(num):
russian = 'абвгдеёжзиклмнопрстуфхцчъыьэюя'
alphabet = russian + string.ascii_letters
return [
"".join([random.choice(alphabet) for x in range(random.randint(1,15))])
for y in range(num)
]
def truncated_words(words):
return [word[:3] for word in words]
def prefixes1k(words, prefix_len):
words = [w for w in words if len(w) >= prefix_len]
every_nth = int(len(words)/1000)
_words = [w[:prefix_len] for w in words[::every_nth]]
return _words[:1000]
WORDS100k = words100k()
MIXED_WORDS100k = truncated_words(WORDS100k)
NON_WORDS100k = random_words(100000)
PREFIXES_3_1k = prefixes1k(WORDS100k, 3)
PREFIXES_5_1k = prefixes1k(WORDS100k, 5)
PREFIXES_8_1k = prefixes1k(WORDS100k, 8)
PREFIXES_15_1k = prefixes1k(WORDS100k, 15)
def _alphabet(words):
chars = set()
for word in words:
for ch in word:
chars.add(ch)
return "".join(sorted(list(chars)))
ALPHABET = _alphabet(WORDS100k)
def bench(name, timer, descr='M ops/sec', op_count=0.1, repeats=3, runs=5):
times = []
for x in range(runs):
times.append(timer.timeit(repeats))
def op_time(time):
return op_count*repeats / time
print("%55s: %0.3f%s" % (
name,
op_time(min(times)),
descr,
))
def create_trie():
words = words100k()
trie = datrie.Trie(ALPHABET)
for word in words:
trie[word] = 1
return trie
def benchmark():
print('\n====== Benchmarks (100k unique unicode words) =======\n')
tests = [
('__getitem__ (hits)', "for word in words: data[word]", 'M ops/sec', 0.1, 3),
('__contains__ (hits)', "for word in words: word in data", 'M ops/sec', 0.1, 3),
('__contains__ (misses)', "for word in NON_WORDS100k: word in data", 'M ops/sec', 0.1, 3),
('__len__', 'len(data)', ' ops/sec', 1, 1),
('__setitem__ (updates)', 'for word in words: data[word]=1', 'M ops/sec', 0.1, 3),
('__setitem__ (inserts, random)', 'for word in NON_WORDS_10k: data[word]=1', 'M ops/sec',0.01, 3),
('__setitem__ (inserts, sorted)', 'for word in words: empty_data[word]=1', 'M ops/sec', 0.1, 3),
('setdefault (updates)', 'for word in words: data.setdefault(word, 1)', 'M ops/sec', 0.1, 3),
('setdefault (inserts)', 'for word in NON_WORDS_10k: data.setdefault(word, 1)', 'M ops/sec', 0.01, 3),
('values()', 'list(d | ata.values())', ' ops/sec', 1, 1),
('keys()', 'list(data.keys())', ' ops/sec', 1, 1),
('items()', 'list(data.items())', ' ops/sec', 1, 1),
]
common_setup = """
from __main__ import create_trie, WORDS100k, NON_WORDS100k, MIXED_WORDS100k, datrie
from __main__ import PREFIXES_3_1k, PREFIXES_5_1k, PREFIXES_8_1k, PREFIXES_15_1k
from __main__ import ALPHABET
words = WORDS100k
NON_WORDS_10k = NON_WORDS100k[:10000]
NON_WORDS_1k | = ['ыва', 'xyz', 'соы', 'Axx', 'avы']*200
"""
dict_setup = common_setup + 'data = dict((word, 1) for word in words); empty_data=dict()'
trie_setup = common_setup + 'data = create_trie(); empty_data = datrie.Trie(ALPHABET)'
for test_name, test, descr, op_count, repeats in tests:
t_dict = timeit.Timer(test, dict_setup)
t_trie = timeit.Timer(test, trie_setup)
bench('dict '+test_name, t_dict, descr, op_count, repeats)
bench('trie '+test_name, t_trie, descr, op_count, repeats)
# trie-specific benchmarks
bench(
'trie.iter_prefix_values (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefix_values(word):\n"
" pass",
trie_setup
),
)
bench(
'trie.prefix_values (hits)',
timeit.Timer(
"for word in words: data.prefix_values(word)",
trie_setup
)
)
bench(
'trie.prefix_values loop (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.prefix_values(word):pass",
trie_setup
)
)
bench(
'trie.iter_prefix_items (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefix_items(word):\n"
" pass",
trie_setup
),
)
bench(
'trie.prefix_items (hits)',
timeit.Timer(
"for word in words: data.prefix_items(word)",
trie_setup
)
)
bench(
'trie.prefix_items loop (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.prefix_items(word):pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (misses)',
timeit.Timer(
"for word in NON_WORDS100k:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (mixed)',
timeit.Timer(
"for word in MIXED_WORDS100k:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.has_keys_with_prefix (hits)',
timeit.Timer(
"for word in words: data.has_keys_with_prefix(word)",
trie_setup
)
)
bench(
'trie.has_keys_with_prefix (misses)',
timeit.Timer(
"for word in NON_WORDS100k: data.has_keys_with_prefix(word)",
trie_setup
)
)
for meth in ('longest_prefix', 'longest_prefix_item', 'longest_prefix_value'):
bench(
'trie.%s (hits)' % meth,
timeit.Timer(
"for word in words: data.%s(word)" % meth,
trie_setup
)
)
bench(
'trie.%s (misses)' % meth,
timeit.Timer(
"for word in NON_WORDS100k: data.%s(word, default=None)" % meth,
trie_setup
)
)
bench(
'trie.%s (mixed)' % meth,
timeit.Timer(
"for word in MIXED_WORDS100k: data.%s(word, default=None)" % meth,
trie_setup
)
)
prefix_data = [
('xxx', 'avg_len(res)==415', 'PREFIXES_3_1k'),
('xxxxx', 'avg_len(res)==17', 'PREFIXES_5_1k'),
('xxxxxxxx', 'avg_len(res)==3', 'PREFIXES_8_1k'),
('xxxxx..xx', 'avg_len(res)==1.4', 'PREFIXES_15_1k'),
('xxx', 'NON_EXISTING', 'NON_WORDS_1k'),
]
for xxx, avg, data in prefix_data:
for meth in ('items', 'keys', 'values'):
bench(
'trie.%s(prefix="%s"), %s' % (meth, xxx, avg),
timeit.Timer(
"for word in %s: data.%s(word)" % (data, meth),
trie_setup
),
'K ops/sec',
op_count=1,
)
def profiling():
print('\n====== Profiling =======\n')
def profile_yep():
import yep
trie = create_trie()
#WORDS = words100k()
yep.start(b'output.prof')
for x in range(100):
trie.keys()
# for x in range(1000):
# for word in WORDS:
# trie[word]
yep.stop()
def profile_cprofile():
import pstats
import cProfile
trie = create_trie()
WORDS = words100k()
def check_trie(trie, words):
value = 0
for word in words:
value += trie[word]
if value != len(words):
|
rkq/cxxexp | third-party/src/boost_1_56_0/libs/geometry/doc/make_qbk.py | Python | mit | 6,060 | 0.016337 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands.
# Copyright (c) 2008-2012 Bruno Lalande, Paris, France.
# Copyright (c) 2009-2012 Mateusz Loskot (mateusz@loskot.net), London, UK
#
# Use, modification and distribution is subject to the Boost Software License,
# Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# ============================================================================
import os, sys
script_dir = os.path.dirname(__file__)
os.chdir(os.path.abspath(script_dir))
print("Boost.Geometry is making .qbk files in %s" % os.getcwd())
if 'DOXYGEN' in os.environ:
doxygen_cmd = os.environ['DOXYGEN']
else:
doxygen_cmd = 'doxygen'
if 'DOXYGEN_XML2QBK' in os.environ:
doxygen_xml2qbk_cmd = os.environ['DOXYGEN_XML2QBK']
else:
doxygen_xml2qbk_cmd = 'doxygen_xml2qbk'
cmd = doxygen_xml2qbk_cmd
cmd = cmd + " --xml doxy/doxygen_output/xml/%s.xml"
cmd = cmd + " --start_include boost/geometry/"
cmd = cmd + " --convenience_header_path ../../../boost/geometry/"
cmd = cmd + " --convenience_headers geometry.hpp,geometries/geometries.hpp"
cmd = cmd + " --skip_namespace boost::geometry::"
cmd = cmd + " --copyright src/copyright_block.qbk"
cmd = cmd + " --output_member_variables false"
cmd = cmd + " > generated/%s.qb | k"
def run_command(command):
if os.system(command) != 0:
raise Exception("Error running %s" % command)
def remove_all_files(dir):
if os.path.exists(dir):
for f in os.listdir(dir):
os.remove(dir+f)
def call_doxygen():
os.chdir("doxy")
remove_all_files("doxygen_output/xml/")
run_command(doxygen_cmd)
os.chdir("..")
def group_to_quickbook(section):
run_command(cmd % ("group__" + section.replace("_", "__"), sect | ion))
def model_to_quickbook(section):
run_command(cmd % ("classboost_1_1geometry_1_1model_1_1" + section.replace("_", "__"), section))
def model_to_quickbook2(classname, section):
run_command(cmd % ("classboost_1_1geometry_1_1model_1_1" + classname, section))
def struct_to_quickbook(section):
run_command(cmd % ("structboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def class_to_quickbook(section):
run_command(cmd % ("classboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def strategy_to_quickbook(section):
p = section.find("::")
ns = section[:p]
strategy = section[p+2:]
run_command(cmd % ("classboost_1_1geometry_1_1strategy_1_1"
+ ns.replace("_", "__") + "_1_1" + strategy.replace("_", "__"),
ns + "_" + strategy))
def cs_to_quickbook(section):
run_command(cmd % ("structboost_1_1geometry_1_1cs_1_1" + section.replace("_", "__"), section))
call_doxygen()
algorithms = ["append", "assign", "make", "clear"
, "area", "buffer", "centroid", "convert", "correct", "covered_by"
, "convex_hull", "crosses", "difference", "disjoint", "distance"
, "envelope", "equals", "expand", "for_each", "is_simple", "is_valid"
, "intersection", "intersects", "length", "num_geometries"
, "num_interior_rings", "num_points", "overlaps", "perimeter"
, "reverse", "simplify", "sym_difference", "touches", "transform"
, "union", "unique", "within"]
access_functions = ["get", "set", "exterior_ring", "interior_rings"
, "num_points", "num_interior_rings", "num_geometries"]
coordinate_systems = ["cartesian", "geographic", "polar", "spherical", "spherical_equatorial"]
core = ["closure", "coordinate_system", "coordinate_type", "cs_tag"
, "dimension", "exception", "interior_type"
, "degree", "radian"
, "is_radian", "point_order"
, "point_type", "ring_type", "tag", "tag_cast" ]
exceptions = ["exception", "centroid_exception"];
iterators = ["circular_iterator", "closing_iterator"
, "ever_circling_iterator"]
models = ["point", "linestring", "box"
, "polygon", "segment", "ring"
, "multi_linestring", "multi_point", "multi_polygon", "referring_segment"]
strategies = ["distance::pythagoras", "distance::pythagoras_box_box"
, "distance::pythagoras_point_box", "distance::haversine"
, "distance::cross_track", "distance::projected_point"
, "within::winding", "within::franklin", "within::crossings_multiply"
, "area::surveyor", "area::huiller"
, "buffer::point_circle", "buffer::point_square"
, "buffer::join_round", "buffer::join_miter"
, "buffer::end_round", "buffer::end_flat"
, "buffer::distance_symmetric", "buffer::distance_asymmetric"
, "buffer::side_straight"
, "centroid::bashein_detmer", "centroid::average"
, "convex_hull::graham_andrew"
, "simplify::douglas_peucker"
, "side::side_by_triangle", "side::side_by_cross_track", "side::spherical_side_formula"
, "transform::inverse_transformer", "transform::map_transformer"
, "transform::rotate_transformer", "transform::scale_transformer"
, "transform::translate_transformer", "transform::ublas_transformer"
]
views = ["box_view", "segment_view"
, "closeable_view", "reversible_view", "identity_view"]
for i in algorithms:
group_to_quickbook(i)
for i in access_functions:
group_to_quickbook(i)
for i in coordinate_systems:
cs_to_quickbook(i)
for i in core:
struct_to_quickbook(i)
for i in exceptions:
class_to_quickbook(i)
for i in iterators:
struct_to_quickbook(i)
for i in models:
model_to_quickbook(i)
for i in strategies:
strategy_to_quickbook(i)
for i in views:
struct_to_quickbook(i)
model_to_quickbook2("d2_1_1point__xy", "point_xy")
group_to_quickbook("arithmetic")
group_to_quickbook("enum")
group_to_quickbook("register")
group_to_quickbook("svg")
class_to_quickbook("svg_mapper")
group_to_quickbook("wkt")
os.chdir("index")
execfile("make_qbk.py")
os.chdir("..")
# Use either bjam or b2 or ../../../b2 (the last should be done on Release branch)
run_command("b2")
|
maurizi/otm-core | opentreemap/treemap/migrations/0009_restructure_replaceable_terms.py | Python | agpl-3.0 | 1,359 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def replace_terms_forward(apps, schema_editor):
Instance = apps.get_model("treemap", "Instance")
instances = Instance.objects.filter(config__contains='\"terms\":')
for instance in instances:
terms = instance.config.terms
if (('Resource' and 'Resources' in terms
and not isinstance(terms['Resource'], dict))):
new_dict = {'singular': terms['Resource'],
'plural': terms['Resources']}
del terms['Resources']
terms['Resource'] = new_dict
instance.save()
def replace_terms_backward(apps, schema_editor):
Instance = apps.get_model("treemap", "Instance")
instances = Instance.objects.filter(config__contains='\ | "terms\":')
for instance in instances:
terms = instance.config.terms |
if (('Resource' in terms
and isinstance(terms['Resource'], dict))):
terms['Resources'] = terms['Resource.plural']
terms['Resource'] = terms['Resource.singular']
instance.save()
class Migration(migrations.Migration):
dependencies = [
('treemap', '0008_instance_eco_rev'),
]
operations = [
migrations.RunPython(replace_terms_forward, replace_terms_backward),
]
|
51reboot/actual_09_homework | 03/huxianglin/FileCopy.py | Python | mit | 1,225 | 0.013875 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------
因为不确定需要拷贝的文件是什么格式,所以使用二进制读取文件并拷贝到目的文件。
为了解决拷贝大文件的时候机器内存不足,设定了每次读取文件的字节数,该字节数可以自己设置。
------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------
'''
def CopyFile(SrcFile,DestFile,TmpSize):
SF=open(SrcFile,'rb')
DF=open(DestFile,'wb')
try:
while True:
Tmp=SF.read(TmpSize)
if not Tmp:
break
DF.write(Tmp)
except Exception ,e:
print 'Error:源文件:%s目的文件%s文件复制失败:'%(SrcFile,DestFile),e
finally:
SF.close()
DF.close()
if __name__ == "__main__":
CopyFile('www_access_20140823.log', 'test3.txt', 1024)
'''
功能ok, 复习了不少东西,加油
'''
|
MiltosD/CEF-ELRC | metashare/utils.py | Python | bsd-3-clause | 4,061 | 0.002955 | '''
This file holds globally useful utility classes and functions, i.e., classes and
functions that are generic enough not to be specific to one app.
'''
import logging
import os
import re
import sys
from datetime import tzinfo, timedelta
from django.conf import settings
# Setup logging support.
LOGGER = l | ogging.getLogger(__name__)
LOGGER.addHandler(settings.LOG_HANDLER)
# try to import the `fcntl` module for locking support through the `Lock` class
# below
try:
| import fcntl
except ImportError:
LOGGER.warn("Locking support is not available for your (non-Unix?) system. "
"Using multiple processes might not be safe.")
def get_class_by_name(module_name, class_name):
'''
Given the name of a module (e.g., 'metashare.resedit.admin')
and the name of a class (e.g., 'ContactSMI'),
return the class type object (in the example, the class ContactSMI).
If no such class exists, throws an AttributeError
'''
try:
class_type = getattr(sys.modules[module_name], class_name)
return class_type
except AttributeError:
raise AttributeError("Module '{0}' has no class '{1}'".format(module_name, class_name))
def verify_subclass(subclass, superclass):
'''
Verify that subclass is indeed a subclass of superclass.
If that is not the case, a TypeError is raised.
'''
if not issubclass(subclass, superclass):
raise TypeError('class {0} is not a subclass of class {1}'.format(subclass, superclass))
def prettify_camel_case_string(cc_str):
'''
Prettifies the given camelCase string so that it is better readable.
For example, "speechAnnotation-soundToTextAlignment" is converted to "Speech
Annotation - Sound To Text Alignment". N.B.: The conversion currently only
recognizes boundaries with ASCII letters.
'''
result = cc_str
if len(result) > 1:
# result = result.replace('-', ' - ') AtA
result = result.replace('_', ' ')
result = result.replace('AtA', 'At a')
result = re.sub(r'(..)(?=[A-Z][a-z])', r'\1 ', result)
result = ' '.join([(len(token) > 1 and (token[0].upper() + token[1:]))
or token[0].upper() for token in result.split()])
return result
def create_breadcrumb_template_params(model, action):
'''
Create a dictionary for breadcrumb templates.
'''
opts = model._meta
dictionary = {
'app_label': opts.app_label,
'verbose_name': opts.verbose_name,
'action': action,
}
return dictionary
class Lock():
"""
Each instance of this class can be used to acquire an exclusive, system-wide
(multi-process) lock on a particular name.
This class will only work on Unix systems viz. systems that provide the
`fcntl` module. On other systems the class will silently do nothing.
"""
def __init__(self, lock_name):
"""
Create a `Lock` object which can create an exclusive lock on the given
name.
"""
if 'fcntl' in sys.modules:
self.handle = open(os.path.join(settings.LOCK_DIR, lock_name), 'w')
else:
self.handle = None
def acquire(self):
"""
Acquire a lock on the name for which this `Lock` was created.
"""
if self.handle:
fcntl.flock(self.handle, fcntl.LOCK_EX)
def release(self):
"""
Release any lock on the name for which this `Lock` was created.
"""
if self.handle:
fcntl.flock(self.handle, fcntl.LOCK_UN)
def __del__(self):
if self.handle:
self.handle.close()
class SimpleTimezone(tzinfo):
"""
A fixed offset timezone with an unknown name and an unknown DST adjustment.
"""
def __init__(self, offset):
self.__offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return None
def dst(self, dt):
return None |
Juniper/neutron | neutron/plugins/ryu/db/api_v2.py | Python | apache-2.0 | 8,246 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy.orm import exc as orm_exc
from neutron.common import exceptions as n_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.ryu.db import models_v2 as ryu_models_v2
LOG = logging.getLogger(__name__)
def network_all_tenant_list():
session = db.get_session()
return session.query(models_v2.Network).all()
def get_port_from_device(port_id):
LOG.debug(_("get_port_from_device() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']]
return port_dict
class TunnelKey(object):
# VLAN: 12 bits
# GRE, VXLAN: 24bits
# TODO(yamahata): STT: 64bits
_KEY_MIN_HARD = 1
_KEY_MAX_HARD = 0xffffffff
def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD):
self.key_min = key_min
self.key_max = key_max
if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or
key_min > key_max):
raise ValueError(_('Invalid tunnel key options '
'tunnel_key_min: %(key_min)d '
'tunnel_key_max: %(key_max)d. '
'Using default value') % {'key_min': key_min,
'key_max': key_max})
def _last_key(self, session):
try:
return session.query(ryu_models_v2.TunnelKeyLast).one()
except orm_exc.MultipleResultsFound:
max_key = session.query(
func.max(ryu_models_v2.TunnelKeyLast.last_key))
if max_key > self.key_max:
max_key = self.key_min
session.query(ryu_models_v2.TunnelKeyLast).delete()
last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key)
except orm_exc.NoResultFound:
last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min)
session.add(last_key)
session.flush()
return session.query(ryu_models_v2.TunnelKeyLast).one()
def _find_key(self, session, last_key):
"""Try to find unused tunnel key.
Trying to find unused tunnel key in TunnelKey table starting
from last_key + 1.
When all keys are used, raise sqlalchemy.orm.exc.NoResultFound
"""
# key 0 is used for special meanings. So don't allocate 0.
# sqlite doesn't support
# '(select order by limit) union all (select order by limit) '
# 'order by limit'
# So do it manually
# new_key = session.query("new_key").from_statement(
# # If last_key + 1 isn't used, it's the result
# 'SELECT new_key '
# 'FROM (SELECT :last_key + 1 AS new_key) q1 '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
#
# 'UNION ALL '
#
# # if last_key + 1 used,
# # find the least unused key from last_key + 1
# '(SELECT t.tunnel_key + 1 AS new_key '
# 'FROM tunnelkeys t '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys ti '
# ' WHERE ti.tunnel_key = t.tunnel_key + 1) '
# 'AND t.t | unnel_key >= :last_key '
# 'ORDER BY new_key LIMIT 1) '
#
# 'ORDER BY new_key LIMIT 1'
# ).params(last_key=last_key).one()
try:
new_key = session.query("new_key").from_statement(
# If last_key + 1 isn't used, it's the result
'SELECT new_key '
| 'FROM (SELECT :last_key + 1 AS new_key) q1 '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
).params(last_key=last_key).one()
except orm_exc.NoResultFound:
new_key = session.query("new_key").from_statement(
# if last_key + 1 used,
# find the least unused key from last_key + 1
'(SELECT t.tunnel_key + 1 AS new_key '
'FROM tunnelkeys t '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys ti '
' WHERE ti.tunnel_key = t.tunnel_key + 1) '
'AND t.tunnel_key >= :last_key '
'ORDER BY new_key LIMIT 1) '
).params(last_key=last_key).one()
new_key = new_key[0] # the result is tuple.
LOG.debug(_("last_key %(last_key)s new_key %(new_key)s"),
{'last_key': last_key, 'new_key': new_key})
if new_key > self.key_max:
LOG.debug(_("No key found"))
raise orm_exc.NoResultFound()
return new_key
def _allocate(self, session, network_id):
last_key = self._last_key(session)
try:
new_key = self._find_key(session, last_key.last_key)
except orm_exc.NoResultFound:
new_key = self._find_key(session, self.key_min)
tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id,
tunnel_key=new_key)
last_key.last_key = new_key
session.add(tunnel_key)
return new_key
_TRANSACTION_RETRY_MAX = 16
def allocate(self, session, network_id):
count = 0
while True:
session.begin(subtransactions=True)
try:
new_key = self._allocate(session, network_id)
session.commit()
break
except sa_exc.SQLAlchemyError:
session.rollback()
count += 1
if count > self._TRANSACTION_RETRY_MAX:
# if this happens too often, increase _TRANSACTION_RETRY_MAX
LOG.warn(_("Transaction retry exhausted (%d). "
"Abandoned tunnel key allocation."), count)
raise n_exc.ResourceExhausted()
return new_key
def delete(self, session, network_id):
session.query(ryu_models_v2.TunnelKey).filter_by(
network_id=network_id).delete()
session.flush()
def all_list(self):
session = db.get_session()
return session.query(ryu_models_v2.TunnelKey).all()
def set_port_status(session, port_id, status):
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except orm_exc |
danielrd6/ifscube | cubetools.py | Python | gpl-3.0 | 39,584 | 0.008034 | """
Functions for the analysis of integral field spectroscopy.
Author: Daniel Ruschel Dutra
Website: https://github.com/danielrd6/ifscube
"""
from numpy import *
import pyfits as pf
import spectools as st
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.integrate import trapz
from copy import deepcopy
from voronoi_2d_binning import voronoi_2d_binning
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.ndimage import gaussian_filter as gf
from scipy.integrate import trapz
from scipy.interpolate import interp1d
from scipy import ndimage
import profiles as lprof
import ppxf
import ppxf_util
def progress(x, xmax, steps=10):
try:
if x%(xmax/steps) == 0:
print '{:2.0f}%\r'.format(float(x)/float(xmax)*100)
except ZeroDivisionError:
pass
class gmosdc:
"""
A class for dealing with data cubes, originally written to work
with GMOS IFU.
"""
def __init__(self, fitsfile, redshift=None, vortab=None):
"""
Initializes the class and loads basic information onto the
object.
Parameters:
-----------
fitstile : string
Name of the FITS file containing the GMOS datacube. This
should be the standard output from the GFCUBE task of the
GEMINI-GMOS IRAF package.
redshift : float
Value of redshift | (z) of the source, if no Doppler
correction has
been applied to the spectra yet.
vortab : string
Name of the file con | taining the Voronoi binning table
Returns:
--------
Nothing.
"""
if len(pf.open(fitsfile)) == 2:
dataext, hdrext = 1,0
elif len(pf.open(fitsfile)) == 1:
dataext, hdrext = 0,0
self.data = pf.getdata(fitsfile,ext=dataext)
self.header_data = pf.getheader(fitsfile, ext=dataext)
self.header = pf.getheader(fitsfile, ext=hdrext)
self.wl = st.get_wl(fitsfile, hdrext=dataext, dimension=0,
dwlkey='CD3_3', wl0key='CRVAL3', pix0key='CRPIX3')
if redshift == None:
try:
redshift = self.header['REDSHIFT']
except KeyError:
print 'WARNING! Redshift not given and not found in the image'\
+ ' header. Using redshift = 0.'
redshift = 0.0
self.restwl = self.wl/(1.+redshift)
try:
if self.header['VORBIN'] and vortab != None:
self.voronoi_tab = vortab
self.binned = True
elif self.header['VORBIN'] and vortab == None:
print 'WARNING! Data has been binned but no binning table has'\
+ ' been given.'
self.binned = True
except KeyError:
self.binned = False
self.fitsfile = fitsfile
self.redshift = redshift
self.spec_indices = column_stack([
ravel(indices(shape(self.data)[1:])[0]),
ravel(indices(shape(self.data)[1:])[1])
])
def continuum(self, writefits=False, outimage=None, fitting_window=None,
copts=None):
"""
Evaluates a polynomial continuum for the whole cube and stores
it in self.cont.
"""
if self.binned:
v = loadtxt(self.voronoi_tab)
xy = v[unique(v[:,2], return_index=True)[1],:2]
else:
xy = self.spec_indices
fw = fitting_window
fwidx = (self.restwl > fw[0]) & (self.restwl < fw[1])
wl = deepcopy(self.restwl[fwidx])
data = deepcopy(self.data[fwidx])
c = zeros(shape(data), dtype='float32')
nspec = len(xy)
if copts == None:
copts = {'degr':3, 'upper_threshold':2,
'lower_threshold':2, 'niterate':5}
try:
copts['returns']
except KeyError:
copts['returns'] = 'function'
for k,h in enumerate(xy):
i,j = h
s = deepcopy(data[:,i,j])
if any(s[:20]) and any(s[-20:]):
try:
cont = st.continuum(wl, s, **copts)
if self.binned:
for l,m in v[v[:,2] == k,:2]:
c[:,l,m] = cont[1]
else:
c[:,i,j] = cont[1]
except TypeError:
print 'Could not find a solution for {:d},{:d}.'\
.format(i,j)
return wl, s
else:
c[:,i,j] = zeros(len(wl), dtype='float32')
self.cont = c
if writefits:
if outimage == None:
outimage = self.fitsfile.replace('.fits','_continuum.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
hdr['CRVAL3'] = wl[0]
hdr.append(('CONTDEGR', copts['degr'],
'Degree of continuum polynomial'))
hdr.append(('CONTNITE', copts['niterate'],
'Continuum rejection iterations'))
hdr.append(('CONTLTR', copts['lower_threshold'],
'Continuum lower threshold'))
hdr.append(('CONTHTR', copts['upper_threshold'],
'Continuum upper threshold'))
pf.writeto(outimage, data=c, header=hdr)
return c
def snr_eval(self, wl_range=[6050,6200], copts=None):
"""
Measures the signal to noise ratio (SNR) for each spectrum in a
data cube, returning an image of the SNR.
Parameters:
-----------
self : gmosdc instance
gmosdc object
wl_range : array like
An array like object containing two wavelength coordinates
that define the SNR window at the rest frame.
copts : dictionary
Options for the continuum fitting function.
Returns:
--------
snr : numpy.ndarray
Image of the SNR for each spectrum.
Description:
------------
This method evaluates the SNR for each spectrum in a data
cube by measuring the residuals of a polynomial continuum
fit. The function CONTINUUM of the SPECTOOLS package is used
to provide the continuum, with zero rejection iterations
and a 3 order polynomial.
"""
noise = zeros(shape(self.data)[1:], dtype='float32')
signal = zeros(shape(self.data)[1:], dtype='float32')
snrwindow = (self.restwl >= wl_range[0]) &\
(self.restwl <= wl_range[1])
data = deepcopy(self.data)
wl = self.restwl[snrwindow]
if copts == None:
copts = {'niterate':0, 'degr':3, 'upper_threshold':3,
'lower_threshold':3, 'returns':'function'}
else:
copts['returns'] = 'function'
for i,j in self.spec_indices:
if any(data[snrwindow,i,j]):
s = data[snrwindow,i,j]
cont = st.continuum(wl, s, **copts)[1]
noise[i,j] = nanstd(s - cont)
signal[i,j] = nanmean(cont)
else:
noise[i,j],signal[i,j] = nan, nan
self.noise = noise
self.signal = signal
return array([signal,noise])
def wlprojection(self, wl0, fwhm=10, filtertype='box', writefits=False,
outimage='wlprojection.fits'):
"""
Writes a projection of the data cube along the wavelength
coordinate, with the flux given by a given type of filter.
Parameters:
-----------
wl0 : float
Central wavelength at the rest frame.
fwhm : float
Full width at half maximum. See 'filtertype'.
filtertype : string
Type of function to be multiplied by the spectrum to return
the argument for the integral.
'box' = Box function that is zero every |
tailhook/zerogw | examples/tabbedchat/tabbedchat/redis.py | Python | mit | 2,974 | 0.003026 | """Redis client protocol
We have own redis wrapper because redis-py does not support python3. We also
don't want another dependency
"""
import socket
def encode_command(buf, parts):
add = buf.extend
add(('*%d\r\n' % len(parts)).encode('ascii'))
for part in parts:
if isinstance(part, str):
part = part.encode('ascii')
add(('$%d\r\n' % len(part)).encode('ascii'))
add(part)
add(b'\r\n')
return buf
class ReplyError(Exception):
"""ERR-style replies from redis are wrapped in this exception"""
class Redis(object):
def __init__(self, socket_path=None, host='localhost', port=6379):
if socket_path:
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._sock.connect(socket_path)
else:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((host, port))
self._buf = bytearray()
def execute(self, *args):
buf = bytearray()
encode_command(buf, args)
self._sock.sendall(buf)
res = self._read_one()
if isinstance(res, ReplyError):
raise res
return res
def bulk(self, commands):
buf = bytearray()
for cmd in commands:
encode_command(buf, cmd)
self._sock.sendall(buf)
result = []
for i in range(len(commands)):
result.append(self._read_one())
if any(isinstance(r, ReplyError) for r in result):
raise ReplyError([r for r in result if isinstance(r, ReplyError)])
return result
def _read_one(self):
line = self._read_line()
ch = line[0]
if ch == 42: # b'*'
cnt = int(line[1:])
return [self._read_one() for i in range(cnt)]
elif ch == 43: # b'+'
return line[1:].decode('ascii')
elif ch == 45: # b'-'
return ReplyError(line[1:].decode('ascii'))
elif ch == 58: # b':'
return int(line[1:])
| elif ch == 36: # b'$'
ln = int(line[1:])
if ln < 0:
return None
res = self._read_slice(ln)
ass | ert self._read_line() == b''
return res
else:
raise NotImplementedError(ch)
def _read_line(self):
while True:
idx = self._buf.find(b'\r\n')
if idx >= 0:
line = self._buf[:idx]
del self._buf[:idx+2]
return line
chunk = self._sock.recv(16384)
if not chunk:
raise EOFError("End of file")
self._buf += chunk
def _read_slice(self, size):
while len(self._buf) < size:
chunk = self._sock.recv(16384)
if not chunk:
raise EOFError("End of file")
self._buf += chunk
res = self._buf[:size]
del self._buf[:size]
return res
|
lucyparsons/OpenOversight | OpenOversight/app/utils.py | Python | gpl-3.0 | 21,683 | 0.001245 | from typing import Optional
from future.utils import iteritems
from urllib.request import urlopen
from io import BytesIO
import boto3
from botocore.exceptions import ClientError
import botocore
import datetime
import hashlib
import os
import random
import sys
from traceback import format_exc
from distutils.util import strtobool
from sqlalchemy import func, or_
from sqlalchemy.sql.expression import cast
from sqlalchemy.orm import selectinload
import imghdr as imghdr
from flask import current_app, url_for
from flask_login import current_user
from PIL import Image as Pimage
from PIL.PngImagePlugin import PngImageFile
from .models import (db, Officer, Assignment, Job, Image, Face, User, Unit, Department,
Incident, Location, LicensePlate, Link, Note, Description, Salary)
from .main.choices import RACE_CHOICES, GENDER_CHOICES
# Ensure the file is read/write by the creator only
SAVED_UMASK = os.umask(0o077)
def set_dynamic_default(form_field, value):
# First we ensure no value is set already
if not form_field.data:
try: # Try to use a default if there is one.
form_field.data = value
except AttributeError:
pass
def get_or_create(session, model, defaults=None, **kwargs):
if 'csrf_token' in kwargs:
kwargs.pop('csrf_token')
# Because id is a keyword in Python, officers member is called oo_id
if 'oo_id' in kwargs:
kwargs = {'id': kwargs['oo_id']}
# We need to convert empty strings to None for filter_by
# as '' != None in the database and
# such that we don't create fields with empty strings instead
# of null.
filter_params = {}
for key, value in kwargs.items():
if value != '':
filter_params.update({key: value})
else:
filter_params.update({key: None})
instance = model.query.filter_by(**filter_params).first()
if instance:
return instance, False
else:
params = dict((k, v) for k, v in iteritems(filter_params))
params.update(defaults or {})
instance = model(**params)
session.add(instance)
session.flush()
return instance, True
def unit_choices(department_id: Optional[int] = None):
if department_id is not None:
return db.session.query(Unit).filter_by(department_id=department_id).order_by(Unit.descrip.asc()).all()
return db.session.query(Unit).order_by(Unit.descrip.asc()).all()
def dept_choices():
return db.session.query(Department).all()
def add_new_assignment(officer_id, form):
if form.unit.data:
unit_id = form.unit.data.id
else:
unit_id = None
job = Job.query\
.filter_by(department_id=form.job_title.data.department_id,
job_title=form.job_title.data.job_title)\
.one_or_none()
new_assignment = Assignment(officer_id=officer_id,
star_no=form.star_no.data,
job_id=job.id,
unit_id=unit_id,
star_date=form.star_date.data,
resign_date=form.resign_date.data)
db.session.add(new_assignment)
db.session.commit()
def edit_existing_assignment(assignment, form):
assignment.star_no = form.star_no.data
job = form.job_title.data
assignment.job_id = job.id
if form.unit.data:
officer_unit = form.unit.data.id
else:
officer_unit = None
assignment.unit_id = officer_unit
assignment.star_date = form.star_date.data
assignment.resign_date = form.resign_date.data
db.session.add(assignment)
db.session.commit()
return assignment
def add_officer_profile(form, current_user):
officer = Officer(first_name=fo | rm.first_name.data,
last_name=form.last_name.data,
middle_initial=form.middle_initial.data,
suffix=form.suffix.data,
race=form.race.data,
gender=form.gender.data,
birth_year=form.birth_year.data,
employment_date=form.em | ployment_date.data,
department_id=form.department.data.id)
db.session.add(officer)
db.session.commit()
if form.unit.data:
officer_unit = form.unit.data
else:
officer_unit = None
assignment = Assignment(baseofficer=officer,
star_no=form.star_no.data,
job_id=form.job_id.data,
unit=officer_unit,
star_date=form.employment_date.data)
db.session.add(assignment)
if form.links.data:
for link in form.data['links']:
# don't try to create with a blank string
if link['url']:
li, _ = get_or_create(db.session, Link, **link)
if li:
officer.links.append(li)
if form.notes.data:
for note in form.data['notes']:
# don't try to create with a blank string
if note['text_contents']:
new_note = Note(
note=note['text_contents'],
user_id=current_user.get_id(),
officer=officer,
date_created=datetime.datetime.now(),
date_updated=datetime.datetime.now())
db.session.add(new_note)
if form.descriptions.data:
for description in form.data['descriptions']:
# don't try to create with a blank string
if description['text_contents']:
new_description = Description(
description=description['text_contents'],
user_id=current_user.get_id(),
officer=officer,
date_created=datetime.datetime.now(),
date_updated=datetime.datetime.now())
db.session.add(new_description)
if form.salaries.data:
for salary in form.data['salaries']:
# don't try to create with a blank string
if salary['salary']:
new_salary = Salary(
officer=officer,
salary=salary['salary'],
overtime_pay=salary['overtime_pay'],
year=salary['year'],
is_fiscal_year=salary['is_fiscal_year'])
db.session.add(new_salary)
db.session.commit()
return officer
def edit_officer_profile(officer, form):
for field, data in iteritems(form.data):
setattr(officer, field, data)
db.session.add(officer)
db.session.commit()
return officer
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in current_app.config['ALLOWED_EXTENSIONS']
def get_random_image(image_query):
if image_query.count() > 0:
rand = random.randrange(0, image_query.count())
return image_query[rand]
else:
return None
def serve_image(filepath):
if 'http' in filepath:
return filepath
if 'static' in filepath:
return url_for('static', filename=filepath.replace('static/', '').lstrip('/'))
def compute_hash(data_to_hash):
return hashlib.sha256(data_to_hash).hexdigest()
def upload_obj_to_s3(file_obj, dest_filename):
s3_client = boto3.client('s3')
# Folder to store files in on S3 is first two chars of dest_filename
s3_folder = dest_filename[0:2]
s3_filename = dest_filename[2:]
file_ending = imghdr.what(None, h=file_obj.read())
file_obj.seek(0)
s3_content_type = "image/%s" % file_ending
s3_path = '{}/{}'.format(s3_folder, s3_filename)
s3_client.upload_fileobj(file_obj,
current_app.config['S3_BUCKET_NAME'],
s3_path,
ExtraArgs={'ContentType': s3_content_type, 'ACL': 'public-read'})
config = s3_client._client_config
config.signature_version = botocore.UNSIGNED
url = boto3.resource(
's3', config=config).meta.client.generate_presigned |
gjsun/MPack | MPack_Core/utils/test.py | Python | mit | 153 | 0.045752 | import nu | mpy as np
from .cosmology import *
from ..Ks_Mstar_Estimate import zmeans
def func1(x):
return x * c_light
def func2 | (x):
return x + zmeans |
fastmonkeys/kuulemma | tests/views/auth/test_activate_account.py | Python | agpl-3.0 | 2,656 | 0 | # -*- coding: utf-8 -*-
# Kuulemma
# Copyright (C) 2014, Fast Monkeys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General | Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from flask import url_for
from kuulemma.models import User
from kuulemma.serializers import account_activation_serializer
fr | om tests.factories import UserFactory
@pytest.mark.usefixtures('database')
class ActivateAccountTest(object):
@pytest.fixture
def user(self):
return UserFactory()
@pytest.fixture
def activation_hash(self, user):
return account_activation_serializer.dumps(user.email)
@pytest.fixture
def response(self, client, activation_hash):
return client.get(
url_for(
'auth.activate_account',
activation_hash=activation_hash
),
follow_redirects=True,
)
class TestActivateAccountWithCorrectLink(ActivateAccountTest):
def test_should_return_200(self, response):
assert response.status_code == 200
def test_should_correct_flash_message(self, response):
message = 'Tilisi on aktivoitu'
assert message in response.data.decode('utf8')
def test_should_activate_account(self, user, response):
assert User.query.get(user.id).active
class TestActivateAccountWithAlreadyActivatedUser(ActivateAccountTest):
@pytest.fixture
def user(self):
return UserFactory(active=True)
def test_should_return_200(self, response):
assert response.status_code == 200
def test_should_return_correct_error_flash(self, response):
message = 'Olet jo aktivoinut tilisi.'
assert message in response.data.decode('utf8')
class TestActivateAccountWithWrongHash(ActivateAccountTest):
@pytest.fixture
def activation_hash(self):
return 'random'
def test_should_return_200(self, response):
assert response.status_code == 200
def test_should_return_correct_error_flash(self, response):
message = 'Tarkista osoite'
assert message in response.data.decode('utf8')
|
ossobv/asterisklint | asterisklint/app/vall/app_originate.py | Python | gpl-3.0 | 1,348 | 0 | # AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2018 Walter Doekes, OSSO B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/li | censes/>.
from ..base import E_APP_ARG_BADOPT, App, AppArg
class AppOrExten(AppArg):
def validate(self, arg, where):
if arg not in ('app', 'exten'):
E_APP_ARG_BADOPT(
where, argno=self.argno, app=self.app, opts=arg)
class Originate(App):
def __init__(self):
super().__init__(
# arg1 means Application-name or Context
args=[AppArg('tech_data'), AppOrExten('type'), App | Arg('arg1'),
AppArg('arg2'), AppArg('arg3'), AppArg('timeout')],
min_args=3)
def register(app_loader):
app_loader.register(Originate())
|
ballotify/django-backend | ballotify/apps/api_v1/streams/permissions.py | Python | agpl-3.0 | 335 | 0.002985 | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
"""
Check if request is safe or | authenticated user is owner.
"""
def has_object_permission(self, request, view, obj):
return request.method in SAFE_METHODS or view.get_stream().owner == request.use | r
|
nmartensen/pandas | pandas/core/indexes/base.py | Python | bsd-3-clause | 141,427 | 0.000035 | import datetime
import warnings
import operator
import numpy as np
from pandas._lib | s import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timestamp, Timedelta, )
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u
from pandas.compat.numpy import function as nv
from | pandas import compat
from pandas.core.dtypes.generic import (
ABCSeries,
ABCMultiIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_object_dtype,
is_categorical_dtype,
is_interval_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.common import (is_bool_indexer,
_values_from_object,
_asarray_tuplesafe)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.base as base
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import pprint_thing
from pandas.core.ops import _comp_method_OBJECT_ARRAY
from pandas.core import strings
from pandas.core.config import get_option
# simplify
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_arrmap = libalgos.arrmap_object
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = frozenset(['str'])
# String Methods
str = base.AccessorProperty(strings.StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, copy=copy, name=name, **kwargs)
# interval
if is_interval_dtype(data):
from .interval import IntervalIndex
return IntervalIndex.from_intervals(data, name=name,
copy=copy)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
data = np.array(data, copy=copy, dtype=dtype)
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
|
themutt/plastex | plasTeX/Packages/hyperref.py | Python | mit | 4,885 | 0.012078 | #!/usr/bin/env python
"""
Implementation of the hyperref package
TO DO:
- \autoref doesn't look for \*autorefname, it only looks for \*name
- Layouts
- Forms optional parameters
"""
from plasTeX import Command, Environment
from plasTeX.Base.LaTeX.Crossref import ref, pageref
from nameref import Nameref, nameref
import urlparse
def addBaseURL(self, urlarg):
try:
baseurl = self.ownerDocument.userdata['packages']['hyperref']['baseurl']
return urlparse.urljoin(baseurl, self.attributes[urlarg])
except KeyError: pass
return self.attributes[urlarg]
# Basic macros
ref.args = '* %s' % ref.args
pageref.args = '* %s' % pageref.args
class href(Command):
args = 'url:url self'
def invoke(self, tex):
res = Command.invoke(self, tex)
self.attributes['url'] = addBaseURL(self, 'url')
return res
class url(Command):
args = 'url:url'
def invoke(self, tex):
res = Command.invoke(self, tex)
self.attributes['url'] = addBaseURL(self, 'url')
return res
class nolinkurl(Command):
args = 'url:url'
def invoke(self, tex):
res = Command.invoke(self, tex)
self.attributes['url'] = addBaseURL(self, 'url')
return res
class hyperbaseurl(Command):
args = 'base:url'
def invoke(self, tex):
res = Command.invoke(self, tex)
data = self.ownerDocument.userdata
if 'packages' not in data:
data['packages'] = {}
if 'hyperref' not in data['packages']:
data['packages']['hyperref'] = {}
self.ownerDocument.userdata['packages']['hyperref']['baseurl'] = self.attributes['base']
return res
class hyperimage(Command):
args = 'url:url self'
def invoke(self, tex):
res = Command.invoke(self, tex)
self.attributes['url'] = addBaseURL(self, 'url')
return res
class hyperdef(Command):
args = 'category name self'
class hyperref(Command):
'''
hyperref has a dual personality depending on whether or not
the first argument is square-bracketed. We only support the
square bracket version for now.
'''
#args = 'url:url category name self'
args = '[label:idref] self'
#def invoke(self, tex):
# res = Command.invoke(self, tex)
# self.attributes['url'] = addBaseURL(self, 'url')
# return res
class hyperlink(Command):
args = 'label:idref self'
class hypertarget(Command):
counter = 'hypertarget' # so we can link to it
args = 'label:id self'
class hypertargetname(Command):
""" Dummy class for hypertarget macro """
unicode = ''
class thehypertarget(Command):
""" Dummy class for | hypertarget macro """
unicode = ''
class phantomsection(Command):
pass
class autoref(Command):
args = 'label:idref'
class pdfstringdef(Command):
args = 'macroname:string tex:string'
class textorpdfstring(Command):
| args = 'tex:string pdf:string'
class pdfstringdefDisableCommands(Command):
args = 'tex:string'
class hypercalcbp(Command):
args = 'size:string'
# Forms
class Form(Environment):
args = '[ parameters:dict ]'
class TextField(Command):
args = '[ parameters:dict ] label'
class CheckBox(Command):
args = '[ parameters:dict ] label'
class ChoiceMenu(Command):
args = '[ parameters:dict ] label choices:list'
class PushButton(Command):
args = '[ parameters:dict ] label'
class Submit(Command):
args = '[ parameters:dict ] label'
class Reset(Command):
args = '[ parameters:dict ] label'
class LayoutTextField(Command):
args = 'label field'
class LayoutChoiceField(Command):
args = 'label field'
class LayoutCheckField(Command):
args = 'label field'
class MakeRadioField(Command):
args = 'width height'
class MakeCheckField(Command):
args = 'width height'
class MakeTextField(Command):
args = 'width height'
class MakeChoiceField(Command):
args = 'width height'
class MakeButtonField(Command):
args = 'self'
class DefaultHeightofSubmit(Command):
args = 'size:dimen'
class DefaultWidthofSubmit(Command):
args = 'size:dimen'
class DefaultHeightofReset(Command):
args = 'size:dimen'
class DefaultWidthofReset(Command):
args = 'size:dimen'
class DefaultHeightofCheckBox(Command):
args = 'size:dimen'
class DefaultWidthofCheckBox(Command):
args = 'size:dimen'
class DefaultHeightofChoiceMenu(Command):
args = 'size:dimen'
class DefaultWidthofChoiceMenu(Command):
args = 'size:dimen'
class DefaultHeightofText(Command):
args = 'size:dimen'
class DefaultWidthofText(Command):
args = 'size:dimen'
class pdfbookmark(Command):
args = '[level:number] text name'
class currentpdfbookmark(Command):
args = 'text name'
class subpdfbookmark(Command):
args = 'text name'
class belowpdfbookmark(Command):
args = 'text name'
|
bm424/churchmanager | migrations/0002_church_wide_crop.py | Python | mit | 641 | 0.00156 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-22 12:05
from __future__ import unicode_literals
|
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('churchmanager', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='church',
name='wide_crop',
field=image_cropping.fields.ImageRatioField('photo', '768x240', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_im | age_field=False, size_warning=False, verbose_name='wide crop'),
),
]
|
cing/ChannelAnalysis | ChannelAnalysis/CoordAnalysis/Grouping.py | Python | mit | 9,522 | 0.005146 | #!/usr/bin/python
###############################################################################
#
# This script produces state labels for all ionic states in MDAnalysis output
# based on coordination integers and groups these according to a passed
# regular expression. Though, the passed state_labels could be any numerical
# label at a time step. This script merely outputs statistics for each traj
# and then across the entire dataset.
#
# Example: For 13/26/39/...-column data with type like:
# 1.0 -0.13 -0.193 0.522 0.0 1.0 0.0 0.0 0.0 2.0 9.0 2.0 1748.0
#
# python State_Grouping.py -f f2.out f3.out -m 3 -c 13 -remove 2000
# -i "(.[^-0+]|[^-0+].)[-0][-0][-0][-0]"
# "\+\+[-0][-0][-0][-0]"
# "(.[^-+0]|[^-+0].)(.[^-+0]|[^-+0].)[-0][-0]"
# "\+\+(.[^-0+]|[^-0+].)[-0][-0]"
# -sf 5 6
#
# This would read in columns 5 and 6 for each ion at a timestep,
# produce state labels like ++0100, 101010, ++00--, 1010-- and then
# match them to the four passed regular expressions. This produces
# data like this where there is N+2 lists where N is the number of
# files passed in the -f argument above:
#
# [[2.0, 0.43, 0.53, 0.01, 0.02, 0.00, 0.48],
# [3.0, 0.13, 0.29, 0.16, 0.40, 0.00, 0.87],
# ['MEAN', 0.28, 0.41, 0.09, 0.21, 0.00, 0.67],
# ['STDERR', 0.14, 0.11, 0.07, 0.18, 0.00, 0.19]]
#
# By Chris Ing, 2013 for Python 2.7
#
###############################################################################
from argparse import ArgumentParser
from numpy import mean
from scipy.stats import sem
from collections import defaultdict
from re import match
from ChannelAnalysis.CoordAnalysis.Preprocessor import *
# This function counts state occupancy in each trajectory. It will return the
# state populations for each trajectory and then the associated stats.
def state_counter(data_floats, data_states, all_possible_states, traj_col=11):
# This is an epic datatype that I will use to quickly build a
# dict of dicts where the 1st key is a trajectory number
# and the second key is the state index and the value is a
# count of how many times that state was observed.
count_totals=defaultdict(lambda: defaultdict(int))
# the data_states datatype is a list of tuples: (state_label, state_int)
data_state_ids = [data[1] for data in data_states]
data_state_labels = [data[0] for data in data_states]
for line, state_label, state_label2 in zip(data_floats, data_state_ids, data_state_labels):
traj_id = line[traj_col]
#if state_label == 13:
# print line, state_label2
count_totals[traj_id][state_label] += 1
# This fills zero in for all known occupancy states for all
# trajectories. This is useful because we do a mean
# across all trajectories later.
for traj_id in count_totals.keys():
for known_state in all_possible_states:
count_totals[traj_id][known_state] += 0
# Return the list of list, the mean and standard error of mean
# for each trajectory in the input.
return count_totals_to_percents(count_totals)
# This is a helper function that takes the datatype generated in
# *_counter functions (trajnum dict -> state_id -> integer counts)
# and converts this to populations in a list without weighting like
# the occupancy count function.
def count_totals_to_percents(count_totals):
# Here's the return datatype that stores the percentage of occupancy
# in a given channel/sf state which can be paired with the indices
ion_count_percents = defaultdict(list)
ion_count_indices = defaultdict(list)
for traj_id, count_dict in count_totals.iteritems():
traj_total_lines = float(sum(count_dict.values()))
for ion_state, ion_count in count_dict.iteritems():
ion_count_percents[traj_id].append(ion_count/traj_total_lines)
ion_count_indices[traj_id].append(ion_state)
# Append a little statistics, sorry if this is confusing...
avgs_by_state=defaultdict(list)
for traj_id, percents in ion_count_percents.iteritems():
state_ids = ion_count_indices[traj_id]
for state_id, percent in zip(state_ids, percents):
avgs_by_state[state_id].append(percent)
for state_id, avg in avgs_by_state.iteritems():
ion_count_percents['MEAN'].append(mean(avg))
ion_count_indices['MEAN'].append(state_id)
ion_count_percents['STDERR'].append(sem(avg))
ion_count_indices['STDERR'].append(state_id)
return (dict(ion_count_percents), dict(ion_count_indices))
if __name__ == '__main__':
parser = ArgumentParser(
description='This script takes regex expressions for a state label\
and outputs how many of your states are classified by that label\
as well as the population of those states in the dataset')
parser.add_argument(
'-f', dest='filenames', type=str, nargs="+", required=True,
help='a filename of coordination data from MDAnalysis traje | ctory data')
parser.add_argument(
'-m', dest='max_ions', type=int, required=True,
help='the maximum number of ions in the channel to consider')
parser.add_argument(
'-c', dest='num_cols', type=int, default=13,
help='the number of columns per ion in the input')
parser.add_argument(
'-remove', dest='remove_frames', type=int, default=0,
help='this is a number o | f frames to remove from the start of the data')
parser.add_argument(
'-s', dest='sort_col', type=int, default=3,
help='a zero inclusive column number to sort your row on, typically x,y,z')
parser.add_argument(
'-sc', dest='sort_cut', type=float, default=0.0,
help='a value on the sort_col range to classify zero coordinated data')
parser.add_argument(
'-sf', dest='sf_col', type=int, nargs="+", default=[5,6],
help='the coordination integer columns that define the selectivity filter')
parser.add_argument(
'-t', dest='traj_col', type=int, default=11,
help='a zero inclusive column number that contains the run number')
parser.add_argument(
'-o', dest='outfile', type=str, default=None,
help='the file to output the sorted padding output of all input files')
parser.add_argument(
'--addtime', dest='add_time', action="store_true", default=False,
help='an optional argument to add time columns to each ion grouping')
parser.add_argument(
'-i', dest='regex', type=str, nargs="+",
help='a list of regex values in quotes')
parser.add_argument(
'-r', dest='resid_col', type=int, default=12,
help='a zero inclusive column number that contains the ion resid')
args = parser.parse_args()
data_f_padded = process_input(filenames=args.filenames,
num_cols=args.num_cols,
max_ions=args.max_ions,
remove_frames=args.remove_frames,
traj_col=args.traj_col,
sort_col=args.sort_col,
add_time=args.add_time,
padded=True)
# Here you can choose to compute regular expression occupany states
# or species occupancy states depending on your fancy.
if False:
data_f_regex = regex_columns(data_f_padded, regex_strings=args.regex,
num_cols=args.num_cols,
sort_col=args.sort_col,
sort_cut=args.sort_cut,
sf_col=args.sf_col,
max_ions=args.max_ions)
print "Regex Macrostate Occupancy"
print state_counter(data_f_padded, data_f_regex, range(len(args.regex)),
traj_col=args.traj_col)
else:
data_species = species_columns(filenames=args.filenames,
|
cleac/bool_to_algeb | lab/exceptions.py | Python | mit | 359 | 0 |
class Pa | rsingException(Exception):
def __init__(self, message, *args, **kargs):
super().__init__(message, *args, **kargs)
class OperatorNotFoundError(Exception):
def __init__(self, operator, *args, **kargs):
super().__init__(
'Operator "{}" not found'.format(operator),
*args,
| **kargs
)
|
mmboulhosa/lulacoin | share/qt/extract_strings_qt.py | Python | mit | 2,052 | 0.004386 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
OUT_CPP="src/qt/lulacoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = F | alse
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.s | tartswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The LulaCoin developers
//! This file is generated by share/qt/extract_strings_qt.py
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *lulacoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("lulacoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
mlesicko/automaticpoetry | forms/markov.py | Python | mit | 1,023 | 0.061584 | import wordtools
import random
from forms.form import Form
class MarkovForm(Form):
def __init__(self):
self.data={}
self.data[""]={}
self.limiter=0
def validate(self,tweet):
cleaned = wordtools.clean(tweet)
if wordtools.validate(cleaned) and len(cleaned)>=2:
return cleaned
else:
return None
def save(self,a):
a.insert(0,"")
a.append("")
for i in range(0,len(a)-1):
if not a[i] in self.data:
self.data[a[i]]={}
if a[i+1] in self.data[a[i]]:
self.data[a[i]][a[i+1]]+=1
else:
self.data[a[i]][a[i+1]]=1
def build(self):
self.limiter+=1
if self.limiter < 1000 or not self.limiter%300==0:
return None
s = ""
lastWord = ""
while True:
total = 0
for | word in self.data[lastWord]:
total+=self.data[lastWord][word]
choice = random.randint(0,total-1)
total = 0
for word in self.data[lastWord]:
total+=self.data[lastWord][word]
if total>choice:
lastWor | d=word
s+=word+" "
break
if lastWord=="":
break
return s.lower()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.