repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
connectIOT/iottoolkit | iottoolkit/core/Agent.py | Python | apache-2.0 | 6,297 | 0.015245 | '''
Created on Sep 15, 2012
Agent classes. Contains references to instances of classes containing observer
handlers and code
Agent Instances are created automatically. Create a named Handler instance under the Agent,
as an instance of the desired handler class,
by create (POST) of a JSON object containing a dictionary of settings
for example Agent.create({'resourceCName': 'addHandler_1','resourceClass': 'addHandler'})
@author: mjkoster
'''
from RESTfulResource import RESTfulResource
from LinkFormatProxy import LinkFormatProxy
import subprocess
class Handler(RESTfulResource): # single base class for handlers to extend directly, contains convenience methods for linking resources
def __init__(self, parentObject=None, resourceDescriptor = {}):
RESTfulResource.__init__(self, parentObject, resourceDescriptor)
self._settings = self._resourceDescriptor # use the constructor descriptor for the initial settings
# link cache keeps endpoints hashed by pathFromBase string, only need to walk the path one time
self._linkBaseDict = self.Resources.get('baseObject').resources
self._linkCache = {}
self._init()
def _init(self):
pass
def get(self, Key=None):
if Key != None :
return self._settings[Key]
else :
return self._settings
def set(self, newSettings): # create an instance of a handler from settings dictionary
self._settings.update(newSettings)
def handleNotify(self, updateRef=None): # external method to call from Observer-Notifier
self._handleNotify(updateRef)
def _handleNotify(self, updateRef=None ): # override this for handling state changes from an observer
pass
def linkToRef(self, linkPath):
'''
takes a path string and walks the object tree from a base dictionary
returns a ref to the resource at the path endpoint
store translations in a hash cache for fast lookup after the first walk
'''
self._linkPath = linkPath
if self._linkPath in self._linkCache.keys() :
return self._linkCache[self._linkPath]
# cache miss, walk path and update cache at end
self._currentDict = self._linkBaseDict
self._path | Elements = linkPath.split('/')
for pathElement in self._pathElements[:-1] : # all but the last, which should be the endpoint
self._currentDict = self._currentDict[pathElement].resources
self._resource = self._currentDict[self._pathElements[-1] | ]
self._linkCache.update({ self._linkPath : self._resource })
return self._resource
def getByLink(self, linkPath):
return self.linkToRef(linkPath).get()
def setByLink(self, linkPath, newValue):
self.linkToRef(linkPath).set(newValue)
class addHandler(Handler): # an example appHandler that adds two values together and stores the result
# define a method for handling state changes in observed resources
def _handleNotify(self, updateRef = None ):
# get the 2 addends, add them, and set the sum location
self._addend1 = self.getByLink(self._settings['addendLink1'])
self._addend2 = self.getByLink(self._settings['addendLink2'])
self.setByLink( self._settings['sumOutLink'], self._addend1 + self._addend2 )
# simple print handler that echoes the value each time an observed resource is updated
class logPrintHandler(Handler):
def _handleNotify(self, resource) :
print resource.Properties.get('resourceName'), ' = ', resource.get()
class BLE_ColorLED_handler(Handler):
def _handleNotify(self, resource = None ):
subprocess.call([("/usr/local/bin/gatttool"),\
("--device="+self._settings['MACaddress']),\
("--addr-type="+self._settings['MACtype']),\
("--char-write"),\
("--handle="+self._settings['charHandle']),\
("--value=0x"+resource.get())])
class Agent(RESTfulResource):
# Agent is a container for Handlers and daemons, instantiated as a resource of a SmartObject
def __init__(self, parentObject=None, resourceDescriptor = {}):
RESTfulResource.__init__(self, parentObject, resourceDescriptor)
self._handlers = {}
def get(self, handlerName=None):
if handlerName == None:
return self._handlers # to get the list of names
else:
if self._handlers.has_key(handlerName) :
return self._handlers[handlerName] # to get reference to handler resources by handler name
return None
# new create takes dictionary built from JSON object POSTed to parent resource
def create(self, resourceDescriptor):
resourceName = resourceDescriptor['resourceName']
resourceClass = resourceDescriptor['resourceClass']
# import the module if it's specified in the descriptor
if resourceDescriptor.has_key('resourceClassPath') :
resourceClassPath = resourceDescriptor['resourceClassPath']
self.importByPath(resourceClassPath)
if resourceName not in self.resources:
# create new instance of the named class and add to resources directory, return the ref
self.resources.update({resourceName : globals()[resourceClass](self, resourceDescriptor)})
#pass the constructor the entire descriptor for creating the properties object
#self.resources.update({resourceName : globals()[resourceClass](self, resourceDescriptor)})
self._handlers.update({resourceName: resourceClass})
return self.resources[resourceName] # returns a reference to the created instance
# need to destroy instance of code module
# FIXME Doesn't seem to work. Need to look at this and recursive import issue, devise dynamic import system
def importByPath(self,classPath):
# separate the module path from the class,import the module, and return the class name
self._components = classPath.split('.')
self._module = __import__( '.'.join(self._components[:-1]) )
return self._module
|
coderbone/SickRage-alt | sickbeard/providers/bitcannon.py | Python | gpl-3.0 | 5,147 | 0.002137 | # coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
#
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Third Party Imports
import validators
from requests.compat import urljoin
# First Party Imports
from sickbeard import logger, tvcache
from sickchill.helper.common import convert_size, try_int
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class BitCannonProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "BitCannon")
self.minseed = None
self.minleech = None
self.custom_url = None
self.api_key = None
self.cache = tvcache.TVCache(self, search_params={"RSS": ["tv", "anime"]})
def search(self, search_strings, age=0, ep_obj=None):
results = []
url = "http://localhost:3000/"
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url set, please check your settings", logger.WARNING)
return results
url = self.custom_url
search_params = {}
anime = ep_obj and ep_obj.show and ep_obj.show.anime
search_params["category"] = ("tv", "anime")[bool(anime)]
if self.api_key:
search_params["apiKey"] = self.api_key
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_params["q"] = search_string
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode('utf-8')), logger.DEBUG)
search_url = urljoin(url, "api/search")
parsed_json = self.get_url(search_url, params=search_params, returns="json")
if not parsed_json:
logger.log("No data returned from provider", logger.DEBUG)
continue
if not self._check_auth_from_data(parsed_json):
return results
for result in parsed_json.pop("torrents", {}):
try:
title = result.pop("title", "")
info_hash = result.pop("infoHash", "")
download_url = "magnet:?xt=urn:btih:" + info_hash
if not all([title, download_url, info_hash]):
continue
swarm = result.pop("swarm", None)
if swarm:
seeders = try_int(swarm.pop("seeders", 0))
leechers = try_int(swarm.pop("leechers", 0))
else:
seeders = leechers = 0
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the "
"minimum seeders or leechers: {0} (S:{1} L:{2})".format
| (title, seeders, leechers), logger.DEBUG)
continue
size = convert_size(result.pop("size", -1)) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
| (title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
@staticmethod
def _check_auth_from_data(data):
if not all([isinstance(data, dict),
data.pop("status", 200) != 401,
data.pop("message", "") != "Invalid API key"]):
logger.log("Invalid api key. Check your settings", logger.WARNING)
return False
return True
provider = BitCannonProvider()
|
astyl/wxPlotLab | mplotlab/models/projections.py | Python | mit | 920 | 0.031522 | # -*-coding:Utf-8 -*
from abcmodels import AModel
from mplotlab.utils.abctypes import FLOAT,LIST,STRING,BOOL,RegisterType
class AProjection(AModel):
parametersInfo = list(AModel.paramet | ersInfo)
parametersInfo.extend([
("plotmodels",LIST,lambda:[],"plotModels"),
("title", STRING,lambda:"title","axes title"),
("xlabel", STRING,lambda:"","axes xlabel"),
("ylabel", STRING,lambda:"","axes ylabel"),
])
class Projection2D(AProjection):
parametersInfo = list(AProjection.parametersInfo)
parametersInfo.extend([
("autolim",BOOL,lambda:True,"Auto lim axis. Won't use x/y min/max"),
("xmin", | FLOAT,lambda:0.0,"axes xmin"),
("xmax", FLOAT,lambda:1.0,"axes xmax"),
("ymin", FLOAT,lambda:0.0,"axes ymin"),
("ymax", FLOAT,lambda:1.0,"axes ymax"),
])
RegisterType(AProjection)
RegisterType(Projection2D)
|
radekp/qt | util/local_database/qlocalexml2cpp.py | Python | lgpl-2.1 | 18,286 | 0.004812 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## No Commercial Usage
## This file contains pre-release code and may not be distributed.
## You may use this file in accordance with the terms and conditions
## contained in the Technology Preview License Agreement accompanying
## this package.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
##
##
##
##
##
##
##
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
import xml.dom.minidom
def check_static_char_array_length(name, array):
# some compilers like VC6 doesn't allow static arrays more than 64K bytes size.
size = reduce(lambda x, y: x+len(escapedString(y)), array, 0)
if size > 65535:
print "\n\n\n#error Array %s is too long! " % name
sys.stderr.write("\n\n\nERROR: the content of the array '%s' is too long: %d > 65535 " % (name, size))
sys.exit(1)
def wrap_list(lst):
def split(lst, size):
for i in range(len(lst)/size+1):
yield lst[i*size:(i+1)*size]
return ",\n".join(map(lambda x: ", ".join(x), split(lst, 20)))
def firstChildElt(parent, name):
child = parent.firstChild
while child:
if child.nodeType == parent.ELEMENT_NODE \
and (not name or child.nodeName == name):
return child
child = child.nextSibling
return False
def nextSiblingElt(si | bling, name):
sib = sibling.nextSibling
while sib:
if sib.nodeType == sibling.ELEMENT_NODE \
and (not name or sib.nodeName == name):
return sib
sib = sib.nextSibling
return False
def eltText(elt):
result = ""
child = elt.f | irstChild
while child:
if child.nodeType == elt.TEXT_NODE:
if result:
result += " "
result += child.nodeValue
child = child.nextSibling
return result
def loadLanguageMap(doc):
result = {}
language_list_elt = firstChildElt(doc.documentElement, "languageList")
language_elt = firstChildElt(language_list_elt, "language")
while language_elt:
language_id = int(eltText(firstChildElt(language_elt, "id")))
language_name = eltText(firstChildElt(language_elt, "name"))
language_code = eltText(firstChildElt(language_elt, "code"))
result[language_id] = (language_name, language_code)
language_elt = nextSiblingElt(language_elt, "language")
return result
def loadCountryMap(doc):
result = {}
country_list_elt = firstChildElt(doc.documentElement, "countryList")
country_elt = firstChildElt(country_list_elt, "country")
while country_elt:
country_id = int(eltText(firstChildElt(country_elt, "id")))
country_name = eltText(firstChildElt(country_elt, "name"))
country_code = eltText(firstChildElt(country_elt, "code"))
result[country_id] = (country_name, country_code)
country_elt = nextSiblingElt(country_elt, "country")
return result
def loadDefaultMap(doc):
result = {}
list_elt = firstChildElt(doc.documentElement, "defaultCountryList")
elt = firstChildElt(list_elt, "defaultCountry")
while elt:
country = eltText(firstChildElt(elt, "country"));
language = eltText(firstChildElt(elt, "language"));
result[language] = country;
elt = nextSiblingElt(elt, "defaultCountry");
return result
def fixedCountryName(name, dupes):
if name in dupes:
return name + "Country"
return name
def fixedLanguageName(name, dupes):
if name in dupes:
return name + "Language"
return name
def findDupes(country_map, language_map):
country_set = set([ v[0] for a, v in country_map.iteritems() ])
language_set = set([ v[0] for a, v in language_map.iteritems() ])
return country_set & language_set
def languageNameToId(name, language_map):
for key in language_map.keys():
if language_map[key][0] == name:
return key
return -1
def countryNameToId(name, country_map):
for key in country_map.keys():
if country_map[key][0] == name:
return key
return -1
def convertFormat(format):
result = ""
i = 0
while i < len(format):
if format[i] == "'":
result += "'"
i += 1
while i < len(format) and format[i] != "'":
result += format[i]
i += 1
if i < len(format):
result += "'"
i += 1
else:
s = format[i:]
if s.startswith("EEEE"):
result += "dddd"
i += 4
elif s.startswith("EEE"):
result += "ddd"
i += 3
elif s.startswith("a"):
result += "AP"
i += 1
elif s.startswith("z"):
result += "t"
i += 1
elif s.startswith("v"):
i += 1
else:
result += format[i]
i += 1
return result
class Locale:
def __init__(self, elt):
self.language = eltText(firstChildElt(elt, "language"))
self.country = eltText(firstChildElt(elt, "country"))
self.decimal = int(eltText(firstChildElt(elt, "decimal")))
self.group = int(eltText(firstChildElt(elt, "group")))
self.listDelim = int(eltText(firstChildElt(elt, "list")))
self.percent = int(eltText(firstChildElt(elt, "percent")))
self.zero = int(eltText(firstChildElt(elt, "zero")))
self.minus = int(eltText(firstChildElt(elt, "minus")))
self.plus = int(eltText(firstChildElt(elt, "plus")))
self.exp = int(eltText(firstChildElt(elt, "exp")))
self.am = eltText(firstChildElt(elt, "am"))
self.pm = eltText(firstChildElt(elt, "pm"))
self.longDateFormat = convertFormat(eltText(firstChildElt(elt, "longDateFormat")))
self.shortDateFormat = convertFormat(eltText(firstChildElt(elt, "shortDateFormat")))
self.longTimeFormat = convertFormat(eltText(firstChildElt(elt, "longTimeFormat")))
self.shortTimeFormat = convertFormat(eltText(firstChildElt(elt, "shortTimeFormat")))
self.standaloneLongMonths = eltText(firstChildElt(elt, "standaloneLongMonths"))
self.standaloneShortMonths = eltText(firstChildElt(elt, "standaloneShortMonths"))
self.standaloneNarrowMonths = eltText(firstChildElt(elt, "standaloneNarrowMonths"))
self.longMonths = eltText(firstChildElt(elt, "longMonths"))
self.shortMonths = eltText(firstChildElt(elt, "shortMonths"))
self.narrowMonths = eltText(firstChildElt(elt, "narrowMonths"))
self.standaloneLongDays = eltText(firstChildElt(elt, "standaloneLongDays"))
self.standaloneShortDays = eltText(firstChildElt(elt, "standaloneShortDays"))
self.standaloneNarrowDays = eltText(firstChildElt(elt, "standaloneNarrowDays"))
self.longDays = eltText(firstChildElt(elt, "longDays"))
self.shortDays = eltText(firstChildElt(elt, "shortDays"))
self.narrowDays = eltText(firstChildElt(elt, "narrowDay |
mic4ael/indico | indico/util/roles.py | Python | mit | 3,193 | 0.003445 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import csv
from flask import flash, session
from indico.core.errors import UserValueError
from indico.modules.events.roles.forms import ImportMembersCSVForm
from indico.modules.users import User
from indico.util.i18n import _, ngettext
from indico.util.string import to_unicode, validate_email
from indico.web.flask.templating import get_template_module
from indico.web.util import jsonify_data, jsonify_template
class ImportRoleMembersMixin(object):
"""Import members from a CSV file into a role."""
logger = None
def import_members_from_csv(self, f):
reader = csv.reader(f.read().splitlines())
emails = set()
for num_row, row in enumerate(reader, 1):
if len(row) != 1:
raise UserValueError(_('Row {}: malformed CSV data').format(num_row))
email = to_unicode(row[0]).strip().lower()
if email and not validate_email(email):
raise UserValueError(_('Row {row}: invalid email addres | s: {email}').format(row=num_row, email=email))
if email in emails:
raise UserValueError(_('Row {}: email address is not unique').format(num_row))
emails.add(email)
users = set(User.query.filter(~User.is_deleted, User.all_emails.in_(emails)))
users_emails = {user.email for user in users}
unknown_emails = emails - users_emails
| new_members = users - self.role.members
return new_members, users, unknown_emails
def _process(self):
form = ImportMembersCSVForm()
if form.validate_on_submit():
new_members, users, unknown_emails = self.import_members_from_csv(form.source_file.data)
if form.remove_existing.data:
deleted_members = self.role.members - users
for member in deleted_members:
self.logger.info('User {} removed from role {} by {}'.format(member, self.role, session.user))
self.role.members = users
else:
self.role.members |= users
for user in new_members:
self.logger.info('User {} added to role {} by {}'.format(user, self.role, session.user))
flash(ngettext("{} member has been imported.",
"{} members have been imported.",
len(users)).format(len(users)), 'success')
if unknown_emails:
flash(ngettext("There is no user with this email address: {}",
"There are no users with these email addresses: {}",
len(unknown_emails)).format(', '.join(unknown_emails)), 'warning')
tpl = get_template_module('events/roles/_roles.html')
return jsonify_data(html=tpl.render_role(self.role, collapsed=False, email_button=False))
return jsonify_template('events/roles/import_members.html', form=form, role=self.role)
|
openstack-packages/delorean | dlrn/migrations/versions/7fbd3a18502f_extra_tables_for_votes_on_aggregates.py | Python | apache-2.0 | 2,153 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extra tables for votes on aggregates
Revision ID: 7fbd3a18502f
Revises: 00a31f1f39c0
Create Date: 2020-01-16 15:22:32.090726
"""
from alembic import op
import sqlalchemy as sa
# revision id | entifiers, used by Alembic.
revision = '7fbd3a18502f'
down_revision = '00a31f1f39c0'
branch_labels = None
depends_on = None
def upgrade():
# Since SQLite3 does not allow ALTER TABLE statements, we need to do a
# batch operation: http://alembic.zzzcomputing.com/en/latest/batch.html
with op.batch_alter_table('promotions') as batch_op:
batch_op.add_column(sa.Column('aggregate_hash', sa.String(6 | 4),
nullable=True))
op.create_table('civotes_agg',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('ref_hash', sa.String(64), nullable=False),
sa.Column('ci_name', sa.String(256), nullable=True),
sa.Column('ci_url', sa.String(1024), nullable=True),
sa.Column('ci_vote', sa.Boolean(), nullable=True),
sa.Column('ci_in_progress', sa.Boolean(), nullable=True),
sa.Column('timestamp', sa.Integer(), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('user', sa.String(255),
sa.ForeignKey('users.username')),
sa.PrimaryKeyConstraint('id'))
def downgrade():
op.drop_table('civotes_agg')
with op.batch_alter_table('promotions') as batch_op:
batch_op.drop_column('aggregate_hash')
|
Alan-Jairo/topgeo | doc.py | Python | mit | 103 | 0.009709 | def topgeo ('topgeo'):
"""
Esta libreria funciona para realizar ca | lculos topografico | s.
"""
|
RihanWu/vocabtool | database.py | Python | mit | 323 | 0 | # -*- coding: utf-8 -*-
"""VocabTool database module
This is the database module of 'VocabTool'.
It connects the program with external database
| """
# Standard library
import sqlite3
# TODO: Implement function:add_to_database <RihanW>
def add_to_database | (word):
"""Add the current word to local database"""
pass
|
jongyeob/swpy | swpy/dscovr/__init__.py | Python | gpl-2.0 | 102 | 0.009804 | from swpy.dscovr.clients import | DscovrC | lient, DscovrRTClient
from swpy.dscovr.graph import DscovrGraph |
neurodata/ndstore | scripts/ingest/mitra/jp2kakadu.py | Python | apache-2.0 | 1,931 | 0.016572 | # Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != | -1:
subfile = 'F/'
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writel | ocation) ] )
if __name__ == "__main__":
main()
|
euccas/CodingPuzzles-Python | leet/source/binarysearch/find_k_closest_elements.py | Python | mit | 4,030 | 0.005955 | class Solution():
def kClosestNumbers(A, target, k):
if A is None or len(A) == 0 or target is None or k is None or k == 0:
return []
start, end = 0, len(A) - 1
closest = 0
while (start + 1 < end):
mid = start + (end - start)//2
if target == A[mid]:
closest = mid
break
elif target < A[mid]:
end = mid
else:
start = mid
if closest != mid:
if abs(A[start] - target) <= abs(A[end] - target):
closest = start
else:
closest = end
return findK(A, target, k, closest)
class Solution1():
def findK(A, target, k, closest_pos):
k_nums = [A[closest_pos]]
left, right = closest_pos - 1, closest_pos + 1
while k - 1 > 0:
if left >= 0 and right <= len(A) - 1:
if abs(A[left] - target) <= abs(A[right] - target):
k_nums.append(A[left])
left -= 1
else:
k_nums.append(A[right])
right += 1
elif left >= 0:
k_nums.append(A[left])
left -= 1
else:
k_nums.append(A[right])
right += 1
k -= 1
return k_nums
class Solution2():
"""
Remove the len(arr) - k farthest numbers
"""
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
if arr is None or len(arr) == 0 or k is None or k == 0 or x is None:
return []
if len(arr) == 1:
return arr
while len(arr) > k:
if abs(arr[0] - x) > abs(arr[-1] - x):
arr.pop(0)
else:
arr.pop(-1)
return arr
class Solution3():
def kClosestNumbers_2(A, target, k):
dic = {i: abs(i - target) for i in A}
res = [j for j in sorted(dic, key=dic.get)]
return res[:k]
class Solution4():
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
if arr is None or len(arr) == 0 or k is None or k == 0 or x | is None:
return []
| # find the number closest to x
idx = self.find_closest_index(arr, x)
return self.find_k(arr, k, x, idx)
def find_closest_index(self, arr, x):
start, end = 0, len(arr) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if arr[mid] == x:
return mid
if arr[mid] > x:
end = mid
else:
start = mid
if abs(arr[start] - x) <= abs(arr[end] - x):
return start
return end
def find_k(self, arr, k, x, pivot_idx):
found = [arr[pivot_idx]]
left, right = pivot_idx - 1, pivot_idx + 1
while len(found) < k and (left >= 0 or right <= len(arr) - 1):
if left >= 0 and right <= len(arr) - 1:
if abs(arr[left] - x) > abs(arr[right] - x):
found.append(arr[right])
right += 1
else:
found.append(arr[left])
left -= 1
elif left >= 0:
found.append(arr[left])
left -= 1
elif right <= len(arr) - 1:
found.append(arr[right])
right += 1
return sorted(found)
if __name__ == "__main__":
sln = Solution4()
res = sln.findClosestElements([1,2,3,4,5], 4, 3)
print(res)
expected_result = [1,2,3,4]
res = sln.findClosestElements([1,4,6,8], 3, 3)
print(res)
expected_result = [4,1,6]
res = sln.findClosestElements([1,4,6,10,20], 21, 4)
print(res)
expected_result = [20,10,6,4]
|
etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/tests/conftest.py | Python | gpl-3.0 | 524 | 0 | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file i | s distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
# Import all fixtures from PyInstaller into the tests.
from PyInst | aller.utils.conftest import *
|
zhengfish/examples | python3/ftp/ftpmirror.py | Python | gpl-2.0 | 13,096 | 0.0042 | #! /usr/bin/env python
#
# @refer
# http://svn.python.org/projects/python/trunk/Tools/scripts/ftpmirror.py
# @note
#
"""
Mirror a remote ftp subtree into a local directory tree.
usage: ftpmirror [-v] [-q] [-i] [-m] [-n] [-r] [-s pat]
[-l username [-p passwd [-a account]]]
hostname[:port] [remotedir [localdir]]
-v: verbose
-q: quiet
-i: interactive mode
-m: macintosh server (NCSA telnet 2.4) (implies -n -s '*.o')
-n: don't log in
-r: remove local files/directories no longer pertinent
-l username [-p passwd [-a account]]: login info (default .netrc or anonymous)
-s pat: skip files matching pattern
hostname: remote host w/ optional port separated by ':'
remotedir: remote directory (default initial)
localdir: local directory (default current)
"""
import os
import sys
import time
import getopt
import ftplib
import netrc
from fnmatch import fnmatch
# Print usage message and exit
def usage(*args):
sys.stdout = sys.stderr
for msg in args: print(msg)
print(__doc__)
sys.e | xit(2)
verbose = 1 # 0 for -q, 2 for -v
interactive = 0
mac = 0
rmok = 0
nologin = 0
skippats = ['.', '..', '.mirrorinfo']
# Main program: parse command line and start processing
def main():
global verbose, interactive, mac, rmok, nologin
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:bil:mnp:qrs:v')
except getopt.error as msg:
usage(msg)
login = ''
passwd = ''
accoun | t = ''
if not args: usage('hostname missing')
host = args[0]
port = 0
if ':' in host:
host, port = host.split(':', 1)
port = int(port)
try:
auth = netrc.netrc().authenticators(host)
if auth is not None:
login, account, passwd = auth
except (netrc.NetrcParseError, IOError):
pass
for o, a in opts:
if o == '-l': login = a
if o == '-p': passwd = a
if o == '-a': account = a
if o == '-v': verbose = verbose + 1
if o == '-q': verbose = 0
if o == '-i': interactive = 1
if o == '-m': mac = 1; nologin = 1; skippats.append('*.o')
if o == '-n': nologin = 1
if o == '-r': rmok = 1
if o == '-s': skippats.append(a)
remotedir = ''
localdir = ''
if args[1:]:
remotedir = args[1]
if args[2:]:
localdir = args[2]
if args[3:]: usage('too many arguments')
#
f = ftplib.FTP()
if verbose: print("Connecting to '%s%s'..." % (host,
(port and ":%d"%port or "")))
f.connect(host,port)
if not nologin:
if verbose:
print('Logging in as %r...' % (login or 'anonymous'))
f.login(login, passwd, account)
if verbose: print('OK.')
pwd = f.pwd()
if verbose > 1: print('PWD =', repr(pwd))
if remotedir:
if verbose > 1: print('cwd(%s)' % repr(remotedir))
f.cwd(remotedir)
if verbose > 1: print('OK.')
pwd = f.pwd()
if verbose > 1: print('PWD =', repr(pwd))
#
mirrorsubdir(f, localdir)
# Core logic: mirror one subdirectory (recursively)
def mirrorsubdir(f, localdir):
pwd = f.pwd()
if localdir and not os.path.isdir(localdir):
if verbose: print('Creating local directory', repr(localdir))
try:
makedir(localdir)
except os.error as msg:
print("Failed to establish local directory", repr(localdir))
return
infofilename = os.path.join(localdir, '.mirrorinfo')
try:
text = open(infofilename, 'r').read()
except IOError as msg:
text = '{}'
try:
info = eval(text)
except (SyntaxError, NameError):
print('Bad mirror info in', repr(infofilename))
info = {}
subdirs = []
listing = []
if verbose: print('Listing remote directory %r...' % (pwd,))
f.retrlines('LIST', listing.append)
filesfound = []
for line in listing:
if verbose > 1: print('-->', repr(line))
if mac:
# Mac listing has just filenames;
# trailing / means subdirectory
filename = line.strip()
mode = '-'
if filename[-1:] == '/':
filename = filename[:-1]
mode = 'd'
infostuff = ''
else:
# Parse, assuming a UNIX listing
words = line.split(None, 8)
if len(words) < 6:
if verbose > 1: print('Skipping short line')
continue
filename = words[-1].lstrip()
i = filename.find(" -> ")
if i >= 0:
# words[0] had better start with 'l'...
if verbose > 1:
print('Found symbolic link %r' % (filename,))
linkto = filename[i+4:]
filename = filename[:i]
infostuff = words[-5:-1]
mode = words[0]
skip = 0
for pat in skippats:
if fnmatch(filename, pat):
if verbose > 1:
print('Skip pattern', repr(pat), end=' ')
print('matches', repr(filename))
skip = 1
break
if skip:
continue
if mode[0] == 'd':
if verbose > 1:
print('Remembering subdirectory', repr(filename))
subdirs.append(filename)
continue
filesfound.append(filename)
if filename in info and info[filename] == infostuff:
if verbose > 1:
print('Already have this version of',repr(filename))
continue
fullname = os.path.join(localdir, filename)
tempname = os.path.join(localdir, '@'+filename)
if interactive:
doit = askabout('file', filename, pwd)
if not doit:
if filename not in info:
info[filename] = 'Not retrieved'
continue
try:
os.unlink(tempname)
except os.error:
pass
if mode[0] == 'l':
if verbose:
print("Creating symlink %r -> %r" % (filename, linkto))
try:
os.symlink(linkto, tempname)
except IOError as msg:
print("Can't create %r: %s" % (tempname, msg))
continue
else:
try:
fp = open(tempname, 'wb')
except IOError as msg:
print("Can't create %r: %s" % (tempname, msg))
continue
if verbose:
print('Retrieving %r from %r as %r...' % (filename, pwd, fullname))
if verbose:
fp1 = LoggingFile(fp, 1024, sys.stdout)
else:
fp1 = fp
t0 = time.time()
try:
f.retrbinary('RETR ' + filename,
fp1.write, 8*1024)
except ftplib.error_perm as msg:
print(msg)
t1 = time.time()
bytes = fp.tell()
fp.close()
if fp1 != fp:
fp1.close()
try:
os.unlink(fullname)
except os.error:
pass # Ignore the error
try:
os.rename(tempname, fullname)
except os.error as msg:
print("Can't rename %r to %r: %s" % (tempname, fullname, msg))
continue
info[filename] = infostuff
writedict(info, infofilename)
if verbose and mode[0] != 'l':
dt = t1 - t0
kbytes = bytes / 1024.0
print(int(round(kbytes)), end=' ')
print('Kbytes in', end=' ')
print(int(round(dt)), end=' ')
print('seconds', end=' ')
if t1 > t0:
print('(~%d Kbytes/sec)' % \
int(round(kbytes/dt),))
print()
#
# Remove files from info that are no longer remote
deletions = 0
for filename in list(info.keys()):
if filename not in filesfound:
if verbose:
print("Removing obsolete info entry for", end=' ')
print(repr( |
kostyll/usb-flash-network-monitor | server/utils.py | Python | mit | 246 | 0.012195 | # -*- coding:utf-8 -*-
# from __future__ import unicode_literals
def clean_up_double_spaces(line):
while line.find(' ') >-1:
line = line.replace(' ',' ')
return line
# in futur | e will be gettext function
def _(x): |
return x
|
sychsergiy/DocFlow | doc_flow/settings/base.py | Python | bsd-3-clause | 3,695 | 0.001624 | import os
import sys
# PATH vars
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
root = lambda *x: os.path.join(BASE_DIR, *x)
sys.path.insert(0, root('apps'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
IN_TESTING = sys.argv[1:2] == ['test']
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'viewflow',
'djangoformsetj | s',
]
SITE_ID = 1
PROJECT_APPS = [
'accounts',
'filters',
'documents',
]
INSTALLED_APPS += PROJECT_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middlew | are.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'doc_flow.apps.accounts.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'doc_flow.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'doc_flow.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'doc_flow',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Internationalization
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
root('assets'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
root('templates'),
],
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
}
]
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/filters/home/'
LOGIN_URL = '/accounts/login/'
# .local.py overrides all the common settings.
try:
from .local import * # noqa
except ImportError:
pass
# importing test settings file if necessary
if IN_TESTING:
from .testing import * # noqa
|
andredalton/bcc | 2014/MAC0242/miniep3/miniep3.py | Python | apache-2.0 | 5,396 | 0.004872 | #! /usr/bin/env python3
import sys
import re
def uso():
"""Imprime instruções de uso do programa."""
uso = """
Este programa gera a ordem correta de inclusão em um banco de dados.
Passe o nome dos arquivos na linha de comando. Caso queira imprimir
ocorrências de referência circular ou inexistente utilize a opção -v.
"""
print(uso)
def imprime(dic):
"""Imprime um dicionário de listas num formato melhorado."""
for key in dic.keys():
print(key + ": ", end="")
print(', '.join(dic[key]))
def procura(dic, ordem, verb):
"""Procura uma ordem correta de inclusão enquanto existirem tabelas cujas todas as referências já
tenham sido processadas. Quando em modo detalhado imprime a ocorrência de referência circular ou
inexistente."""
if len(dic) == 0:
""" Busca finalizada. """
return ordem
lst = set()
""" Tabelas a serem removidas nesta iteração. """
for table in dic.keys():
if len(dic[table]) == 0:
lst.add(table)
if len(lst) == 0:
""" Caso todas as tabelas restantes possuam referências a serem processadas restaram apenas
referências inexistentes ou circulares."""
if verb:
print("\nAs tabelas a seguir possuem referência circular ou inexistente:")
imprime(dic)
print("\nO resultado obtido foi:")
return ordem
for key in lst:
ordem.append(key)
del(dic[key])
for table in dic.keys():
for key in lst:
if key in dic[tab | le]:
dic[table].remove(key)
procura(dic, ordem, verb)
def procedencia(lst, verb):
"""Gera uma lista de procedencia para cada tabela.
|
Inicialmente a função iria trabalhar com o arquivo separado por linhas,
mas como o arquivo pode ser inteiro feito em apenas uma linha modifiquei
a estratégia para uma varredura de estados. Não me preocupei com erros de
sintaxe.
Lista de estados:
0: Procurando por uma instrução CREATE
1: Verificando se é uma instrução de criação de tabela TABLE
2: Procurando o nome da tabela que está sendo criada, contando que diferente de ";"
3: Procurando se é uma referência a criação de chave estrangeira FOREIGN
4: Verificando se é uma referência a criação de chave estrangeira KEY
5: Procurando as referências REFERENCES
6: Procurando o nome da tabela de referência, contando que diferente de ";"
7: Próxima palavra é o novo delimitador
final: Caso ocorra uma instrução com o delimitador encerra a criação da tabela
"""
delimitador = ";"
status = 0
"""Estado inicial do autômato."""
proc = {}
""" Dicionário de procedentes. """
tabela = ""
""" Tabela sendo montada no estado atual. """
fim = re.compile(".*" + delimitador + ".*")
""" Expressão regular que verifica a ocorrência de um delimitador ";".
Supondo que o delimitador não seja alterado. """
create = re.compile(".*[cC][rR][eE][aA][tT][eE]$")
""" Expressão regular que verifica se a palavra atual termina com CREATE. """
delim = re.compile(".*[dD][eE][lL][iI][mM][iI][tT][eE][rR]$")
""" Expressão regular que verifica se a palavra atual termina com DELIMITER. """
for p in lst:
if status == 0 and create.match(p):
status = 1
elif status == 0 and delim.match(p):
status = 7
elif status == 1:
if p.lower() == "table":
status = 2
else:
status = 0
elif status == 2 and p != delimitador and len(p.replace("`","")) > 0:
tabela = p.replace("`","")
if tabela in proc and verb:
print("TABELA " + tabela + " RECRIADA")
proc[tabela] = set()
status = 3
elif status == 3 and p.lower() == "foreign":
status = 4
elif status == 4:
if p.lower() == "key":
status = 5
else:
status = 0
elif status == 5 and p.lower() == "references":
status = 6
elif status == 6 and p != delimitador and len(p.replace("`","")) > 0:
ref = p.replace("`","")
proc[tabela].add(ref)
status = 3
elif status == 7:
delimitador = p
fim = re.compile(".*" + re.escape(delimitador) + ".*")
status = 0
elif fim.match(p):
if create.match(p):
status = 1
else:
status = 0
tabela = ""
return proc
def main(argv):
veb = False
if "-v" in argv:
veb = True
""" Função que trata a linha de comando e chama as funcões do programa."""
ordem = []
""" Lista que irá conter a ordem de restauração dos arquivos. """
if len(argv) > 0:
for arquivo in argv:
if arquivo == "-v":
continue
ordem = []
if len(argv) > 1:
print("\nARQUIVO: " + arquivo)
with open(arquivo, "r") as myfile:
text=myfile.read().split()
dic = procedencia(text, veb)
procura(dic, ordem, veb)
print('.sql\n'.join(ordem), end=".sql\n")
else:
uso()
if __name__ == "__main__":
main(sys.argv[1:]) |
nop33/indico | indico/modules/events/contributions/operations.py | Python | gpl-3.0 | 9,065 | 0.002978 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import timedelta
from flask import session
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.events.contributions import logger
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.persons import ContributionPersonLink
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.logs.models.entries import EventLogKind, EventLogRealm
from indico.modules.events.timetable.operations import (delete_timetable_entry, schedule_contribution,
update_timetable_entry)
from indico.modules.events.util import set_custom_fields
def _ensure_consistency(contrib):
"""Unschedule contribution if not consistent with timetable
A contribution that has no session assigned, may not be scheduled
inside a session. A contribution that has a session assigned may
only be scheduled inside a session block associated with that
session, and that session block must match the session block of
the contribution.
:return: A bool indicating whether the contribution has been
unscheduled to preserve consistency.
"""
entry = contrib.timetable_entry
if entry is None:
return False
if entry.parent_id is None and (contrib.session is not None or contrib.session_block is not None):
# Top-level entry but we have a session/block set
delete_timetable_entry(entry, log=False)
return True
elif entry.parent_id is not None:
parent = entry.parent
# Nested entry but no or a different session/block set
if parent.session_block.session != contrib.session or parent.session_block != contrib.session_block:
delete_timetable_entry(entry, log=False)
return True
return False
def create_contribution(event, contrib_data, custom_fields_data=None, session_block=None, extend_parent=False):
start_dt = contrib_data.pop('start_dt', None)
contrib = Contribution(event=event)
contrib.populate_from_dict(contrib_data)
if start_dt is not None:
schedule_contribution(contrib, start_dt=start_dt, session_block=session_block, extend_parent=extend_parent)
if custom_fields_data:
set_custom_fields(contrib, custom_fields_data)
db.session.flush()
signals.event.contribution_created.send(contrib)
logger.info('Contribution %s created by %s', contrib, session.user)
contrib.event.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
'Contribution "{}" has been created'.format(contrib.title), session.user)
return contrib
@no_autoflush
def update_contribution(contrib, contrib_data, custom_fields_data=None):
"""Update a contribution
:param contrib: The `Contribution` to update
:param contrib_data: A dict containing the data to update
:param custom_fields_data: A dict containing the data for custom
fields.
:return: A dictionary containing information related to the
update. `unscheduled` will be true if the modification
resulted in the contribution being unscheduled. In this
case `undo_unschedule` contains the necessary data to
re-schedule it (undoing the session change causing it to
be unscheduled)
"""
rv = {'unscheduled': False, 'undo_unschedule': None}
current_session_block = contrib.session_block
start_dt = contrib_data.pop('start_dt', None)
if start_dt is not None:
update_timetable_entry(contrib.timetable_entry, {'start_dt': start_dt})
changes = contrib.populate_from_dict(contrib_data)
if custom_fields_data:
changes.update(set_custom_fields(contrib, custom_fields_data))
if 'session' in contrib_data:
timetable_entry = contrib.timetable_entry
if timetable_entry is not None and _ensure_consistency(contrib):
rv['unscheduled'] = True
rv['undo_unschedule'] = {'start_dt': timetable_entry.start_dt.isoformat(),
'contribution_id': contrib.id,
'session_block_id': current_session_block.id if current_session_block else None,
'force': True}
db.session.flush()
if changes:
signals.event.contribution_updated.send(contrib, changes=changes)
logger.info('Contribution %s updated by %s', contrib, session.user)
contrib.event.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
'Contribution "{}" has been updated'.format(contrib.title), session.user)
return rv
def delete_contribution(contrib):
contrib.is_deleted = True
if contrib.timetable_entry is not None:
delete_timetable_entry(contrib.timetable_entry, log=False)
db.session.flush()
signals.event.contribution_deleted.send(contrib)
logger.info('Contribution %s deleted by %s', contrib, session.user)
contrib.event.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
'Contribution "{}" has been deleted'.format(contrib.title), session.user)
def create_subcontribution(contrib, data):
subcontrib = SubContribution()
subcontrib.populate_from_dict(data)
contrib.subcontributions.append(subcontrib)
db.session.flush()
signals.event.subcontribution_created.send(subcontrib)
logger.info('Subcontribution %s created by %s', subcontrib, session.user)
subcontrib.event.log(EventLogRealm.management, EventLogKind.positive, 'Subcontributions',
'Subcontribution "{}" has been created'.format(subcontrib.title), session.user)
return subcontrib
def update_subcontribution(subcontrib, data):
subcontrib.populate_from_dict(data)
db.session.flush()
signals.event.subcontribution_updated.send(subcontrib)
logger.info('Subcontribution %s updated by %s', subcontrib, session.user)
subcontrib.event.log(EventLogRealm.management, EventLogKind.change, 'Subcontributions',
'Subcontribution "{}" has been updated'.format(subcontrib.title), session.user)
def delete_subcontribution(subcontrib):
subcontrib.is_deleted = True
| db.session.flush()
signals.event.subcontribution_deleted.send(subcontrib)
logger.info('Subcontribution %s deleted by %s', | subcontrib, session.user)
subcontrib.event.log(EventLogRealm.management, EventLogKind.negative, 'Subcontributions',
'Subcontribution "{}" has been deleted'.format(subcontrib.title), session.user)
@no_autoflush
def create_contribution_from_abstract(abstract, contrib_session=None):
event = abstract.event
contrib_person_links = set()
person_link_attrs = {'_title', 'address', 'affiliation', 'first_name', 'last_name', 'phone', 'author_type',
'is_speaker', 'display_order'}
for abstract_person_link in abstract.person_links:
link = ContributionPersonLink(person=abstract_person_link.person)
link.populate_from_attrs(abstract_person_link, person_link_attrs)
contrib_person_links.add(link)
duration = contrib_session.default_contribution_duration |
valmynd/MediaFetcher | src/main.py | Python | gpl-3.0 | 9,141 | 0.021879 | #!/usr/bin/env python3
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from models.settingsmodel import SettingsModel
from views.clipboardview import ClipBoardView
from views.downloadview import DownloadView
from views.settingsview import SettingsDialog, SettingsToolBar, ConfigurableToolBar
import re
from plugins import * # dont remove this line!
__author__ = "C. Wilhelm"
___license___ = "GPL v3"
class TrayIcon(QSystemTrayIcon):
def __init__(self, main_window):
QSystemTrayIcon.__init__(self, main_window)
self.mainWindow = main_window
self.activated.connect(self.trayActivated)
self.setIcon(QIcon("../img/icon.png"))
self.minimizeAction = QAction("Mi&nimize", self, triggered=main_window.hide)
self.maximizeAction = QAction("Ma&ximize", self, triggered=main_window.showMaximized)
self.restoreAction = QAction("&Restore", self, triggered=main_window.showNormal)
self.quitAction = QAction("&Quit", self, shortcut="Ctrl+Q", triggered=main_window.close)
self.quitAction.setIcon(QIcon.fromTheme("application-exit"))
menu = QMenu(main_window)
menu.addAction(self.minimizeAction)
menu.addAction(self.maximizeAction)
menu.addAction(self.restoreAction)
menu.addSeparator()
menu.addAction(self.quitAction)
self.setContextMenu(menu)
if QSystemTrayIcon.isSystemTrayAvailable():
self.show()
def trayActivated(self, reason):
if reason == QSystemTrayIcon.ActivationReason.DoubleClick:
self.mainWindow.showNormal() if self.mainWindow.isHidden() else self.mainWindow.hide()
class SearchBar(QLineEdit):
def __init__(self, callback=None):
QLineEdit.__init__(self)
self.button = QToolButton(self)
self.button.setIcon(QIcon("../img/edit-clear.png"))
self.button.setCursor(Qt.ArrowCursor)
self.button.clicked.connect(self.clear)
self.button.hide()
self.textChanged.connect(self.toggleButton)
if callback is not None:
self.returnPressed.connect(lambda: callback(self.text()))
# self.setFixedHeight(28)
self.setPlaceholderText(" < Search Term or URL >")
def resizeEvent(self, event):
self.button.setStyleSheet("QToolButton {margin: 0 0 0 0; border: 0px;}")
x = self.size().width() - self.button.sizeHint().width() - 2
y = (self.size().height() + 1 - self.button.sizeHint().height()) / 2
self.button.move(x, y)
def toggleButton(self):
self.button.setVisible(bool(self.text()))
class MainToolBar(ConfigurableToolBar):
def __init__(self, main_window):
ConfigurableToolBar.__init__(self, "Toolbar", main_window)
self.mainWindow = main_window
self.searchBar = SearchBar(callback=main_window.search)
self.openAction = QAction("&Open Container File", self, triggered=main_window.open)
self.startAction = QAction("&Start Downloads", self, triggered=self.togglePause)
self.pauseAction = QAction("&Pause Downloads", self, triggered=self.togglePause)
self.settingsAction = QAction("Prefere&nces", self, triggered=main_window.showSettings)
self.searchAction = QAction("Search Button", self, triggered=lambda: self.searchBar.returnPressed.emit())
self.openAction.setIcon(QIcon.fromTheme("folder-open"))
self.startAction.setIcon(QIcon.fromTheme("media-playback-start"))
self.pauseAction.setIcon(QIcon.fromTheme("media-playback-pause"))
self.settingsAction.setIcon(QIcon.fromTheme("emblem-system"))
self.searchAction.setIcon(QIcon.fromTheme("system-search"))
self.searchBarAction = QWidgetAction(self)
self.searchBarAction.setText("Search Bar") # make it checkable in the menu of visible actions
self.searchBarAction.setDefaultWidget(self.searchBar)
self.startButton = QToolButton(self)
self.startButton.setDefaultAction(self.startAction)
self.startButtonAction = QWidgetAction(self)
self.startButtonAction.setText("Start/Pause Downloads")
self.startButtonAction.setDefaultWidget(self.startButton)
self.addAction(self.openAction)
self.addAction(self.searchBarAction)
self.addAction(self.searchAction)
self.addAction(self.startButtonAction)
self.addAction(self.settingsAction)
def togglePause(self):
if self.startButton.defaultAction() == self.pauseAction:
self.startButton.removeAction(self.pauseAction)
self.startButton.setDefaultAction(self.startAction)
self.startAction.setDisabled(False)
self.pauseAction.setDisabled(True)
self.mainWindow.downloadView.model().pause()
else:
self.startButton.removeAction(self.startAction)
self.startButton.setDefaultAction(self.pauseAction)
self.pauseAction.setDisabled(False)
self.startAction.setDisabled(True)
s | elf.mainWindow.tabBar.setCurrentWidget(self.mainWindow.downloadView)
self.mainWindow.downloadView.model().start()
class MainWindow(QMainWindow):
aboutToQuit = pyqtSignal()
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle("Media Fetcher")
self.setWindowIcon(QIcon("../img/icon.png"))
self.tray = TrayIcon(self)
self.settings = QSettings(QSettings.IniFormat, QSettings.UserScope, "MediaFetcher", "MediaFetcher")
self.settingsPath = QFileInfo(self.settings.fileName()).abs | olutePath()
self.settingsModel = SettingsModel(self.settings)
self.settingsDialog = SettingsDialog(self, self.settingsModel)
self.statusBar = SettingsToolBar(self, self.settingsModel)
self.addToolBar(Qt.BottomToolBarArea, self.statusBar)
self.toolBar = MainToolBar(self)
self.addToolBar(Qt.TopToolBarArea, self.toolBar)
self.initMenus()
self.initTabs()
self.loadSettings()
self.aboutToQuit.connect(self.writeSettings)
# monitor Clipboard
QApplication.clipboard().dataChanged.connect(self.clipBoardChanged)
self.clipboardView.addURL("https://www.youtube.com/watch?v=IsBOoY2zvC0")
def closeEvent(self, event):
# http://qt-project.org/doc/qt-5.0/qtwidgets/qwidget.html#closeEvent
# http://qt-project.org/doc/qt-5.0/qtcore/qcoreapplication.html#aboutToQuit
self.aboutToQuit.emit()
def loadSettings(self):
self.resize(600, 400)
def writeSettings(self):
pass
def showSettings(self):
self.settingsDialog.open()
def initMenus(self):
# toolbar actions may be set to invisible (exceptions: start, pause), so the main menu can't use these!
self.openAction = QAction("&Open Container File", self, shortcut=QKeySequence.Open, triggered=self.open)
self.settingsAction = QAction("Prefere&nces", self, triggered=self.showSettings)
self.openAction.setIcon(QIcon.fromTheme("folder-open"))
self.settingsAction.setIcon(QIcon.fromTheme("emblem-system"))
self.fileMenu = QMenu("&File", self)
self.fileMenu.addAction(self.openAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.toolBar.startAction)
self.fileMenu.addAction(self.toolBar.pauseAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.tray.quitAction)
self.editMenu = QMenu("&Edit", self)
self.editMenu.addAction(self.settingsAction)
self.viewMenu = QMenu("&View", self)
self.viewMenu.addAction(self.toolBar.toggleViewAction())
self.viewMenu.addAction(self.statusBar.toggleViewAction())
self.helpMenu = QMenu("&Help", self)
self.helpMenu.addAction(QAction("About", self, triggered=self.about))
self.menuBar().addMenu(self.fileMenu)
self.menuBar().addMenu(self.editMenu)
self.menuBar().addMenu(self.viewMenu)
self.menuBar().addMenu(self.helpMenu)
def addTab(self, widget, label, closable=True):
i = self.tabBar.count()
self.tabBar.addTab(widget, " %s " % label if not closable else label)
button = self.tabBar.tabBar().tabButton(i, QTabBar.RightSide)
button.setStyleSheet("QToolButton {margin: 0; padding: 0;}")
if not closable:
button.setFixedWidth(0)
self.tabBar.setCurrentIndex(i)
def initTabs(self):
self.tabBar = QTabWidget()
self.setCentralWidget(self.tabBar)
self.tabBar.setTabsClosable(True)
appropriate_height = QLineEdit().sizeHint().height()
self.tabBar.setStyleSheet("QTabBar::tab {height: %spx;}" % appropriate_height)
self.tabBar.tabCloseRequested.connect(lambda i: self.tabBar.removeTab(i))
# Downloads Tab
self.downloadView = DownloadView(self, self.settings)
self.addTab(self.downloadView, "Downloads", closable=False)
# Clipboard Tab
self.clipboardView = ClipBoardView(self, self.settings, self.downloadView)
sel |
bgmerrell/desmod | tests/test_queue.py | Python | mit | 1,789 | 0 | from desmod.queue import Queue, PriorityQueue
def test_mq(env):
queue = Queue(env, capacity=2)
def producer(msg, wait):
yield env.timeout(wait)
yield queue.put(msg)
def consumer(expected_msg, wait):
yield env.timeout(wait)
msg = yield queue.get()
assert msg == expected_msg
env.process(producer('1st', 0))
env.process(producer('2nd', 1))
env.process(consumer('1st', 0))
env.process(consumer('2nd', 1))
env.run()
def test_mq_when_full(env):
queue = Queue(env, capacity=2)
result = []
def producer(env):
yield env.timeout(1)
| for i in range(5):
yield queue.put(i)
yield env.timeout(1)
def consumer(env):
yield env.timeout(5)
for i in range(3):
msg = yield queue.get()
assert msg == i
def full_waiter(env):
yield queue.when_full()
result.append('full')
def any_waiter(env):
yield queue.when_a | ny()
assert env.now == 1
result.append('any')
env.process(producer(env))
env.process(consumer(env))
env.process(full_waiter(env))
env.process(any_waiter(env))
env.process(any_waiter(env))
env.run()
assert queue.items
assert queue.is_full
assert 'full' in result
assert result.count('any') == 2
def test_priority_mq(env):
queue = PriorityQueue(env)
def producer(env):
for i in reversed(range(5)):
yield queue.put(i)
yield env.timeout(1)
def consumer(env):
yield env.timeout(5)
for i in range(5):
msg = yield queue.get()
yield env.timeout(1)
assert msg == i
env.process(producer(env))
env.process(consumer(env))
env.run()
|
xuewei4d/scikit-learn | asv_benchmarks/benchmarks/ensemble.py | Python | bsd-3-clause | 3,520 | 0 | from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import (RandomForestClassifier,
GradientBoostingClassifier,
HistGradientBoostingClassifier)
from .common import Benchmark, Estimator, Predictor
from .datasets import (_20newsgroups_highdim_dataset,
_20newsgroups_lowdim_dataset,
_synth_classification_dataset)
from .utils import make_gen_classif_scorers
class RandomForestClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for RandomForestClassifier.
"""
| param_names = ['representation', 'n_jobs']
params = (['dense', 'sp | arse'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, n_jobs = params
if representation == 'sparse':
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, n_jobs = params
n_estimators = 500 if Benchmark.data_size == 'large' else 100
estimator = RandomForestClassifier(n_estimators=n_estimators,
min_samples_split=10,
max_features='log2',
n_jobs=n_jobs,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class GradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GradientBoostingClassifier.
"""
param_names = ['representation']
params = (['dense', 'sparse'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, = params
if representation == 'sparse':
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, = params
n_estimators = 100 if Benchmark.data_size == 'large' else 10
estimator = GradientBoostingClassifier(n_estimators=n_estimators,
max_features='log2',
subsample=0.5,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class HistGradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for HistGradientBoostingClassifier.
"""
param_names = []
params = ()
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000,
n_features=100,
n_classes=5)
return data
def make_estimator(self, params):
estimator = HistGradientBoostingClassifier(max_iter=100,
max_leaf_nodes=15,
early_stopping=False,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
|
SergeySatskiy/ttkiosk | src/ui/1440x900/Logo.py | Python | gpl-2.0 | 3,995 | 0.009512 | #
# ttkiosk - table tennis club touch screen based kiosk.
# Copyright (C) 2009 Sergey Satskiy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# $Id$
#
# Generated by PyQt4 UI code generator 4.5.4 and then customized
#
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import SIGNAL
import ui
from utils import debugMsg, GlobalData
class ClickableLogoLabel( QtGui.QLabel ):
""" The class is only for the club logo label """
def | __init__( self, firstLabel ):
QtGui.QLabel.__init__( self, firstLabel )
def mouseReleaseEvent( self, event ):
""" go home """
ui.navigateHome()
if GlobalD | ata().isAdmin:
ui.hideForm( 'TopBar' )
ui.showForm( 'AdminTopBar' )
return
class Ui_Logo(ui.FormBaseClass):
def setupUi(self, Logo, path):
ui.FormBaseClass.__init__(self)
Logo.setObjectName("Logo")
Logo.resize(400, 300)
self.gridLayoutWidget = QtGui.QWidget(Logo)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 351, 171))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setObjectName("gridLayout")
#self.logoPicture = QtGui.QLabel(self.gridLayoutWidget)
self.logoPicture = ClickableLogoLabel(self.gridLayoutWidget)
self.updatePicture( path )
self.logoPicture.setObjectName("logoPicture")
self.gridLayout.addWidget(self.logoPicture, 1, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(5, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 5, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem1, 0, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(20, 5, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem2, 2, 1, 1, 1)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem3, 1, 2, 1, 1)
self.retranslateUi(Logo)
QtCore.QMetaObject.connectSlotsByName(Logo)
def retranslateUi(self, Logo):
Logo.setWindowTitle(QtGui.QApplication.translate("Logo", "Form", None, QtGui.QApplication.UnicodeUTF8))
def updatePicture( self, path ):
pictureName = ""
if GlobalData().isConnected:
pictureName = path + 'logo.gif'
else:
pictureName = path + 'warning.png'
self.logoPicture.setPixmap(QtGui.QPixmap(pictureName))
return
class Logo(QtGui.QWidget, Ui_Logo):
def __init__(self, path, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QWidget.__init__(self, parent, f)
self.path = path
self.setupUi(self, path)
self.connect( GlobalData().application,
SIGNAL( "connectionStatus" ),
self.onConnectionChanged )
return
def setLayoutGeometry( self, width, height ):
""" updates the whole form layout size """
self.gridLayoutWidget.setGeometry( QtCore.QRect( 0, 0, width, height ) )
return
def onConnectionChanged( self ):
self.updatePicture( self.path )
return
|
rmaceissoft/django-photologue | photologue/views.py | Python | bsd-3-clause | 5,802 | 0.003792 | import warnings
from django.views.generic.dates import ArchiveIndexView, DateDetailView, DayArchiveView, MonthArchiveView, \
YearArchiveView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.base import RedirectView
from django.core.urlresolvers import reverse
from .models import Photo, Gallery
# Gallery views.
class GalleryListView(ListView):
queryset = Gallery.objects.on_site().is_public()
paginate_by = 20
class GalleryDetailView(DetailView):
queryset = Gallery.objects.on_site().is_public()
class GalleryDateView(object):
queryset = Gallery.objects.on_site().is_public()
date_field = 'date_added'
allow_empty = True
class GalleryDateDetailView(GalleryDateView, DateDetailView):
pass
class GalleryArchiveIndexView(GalleryDateView, ArchiveIndexView):
pass
class GalleryDayArchiveView(GalleryDateView, DayArchiveView):
pass
class GalleryMonthArchiveView(GalleryDateView, MonthArchiveView):
pass
class GalleryYearArchiveView(GalleryDateView, YearArchiveView):
make_object_list = True
# Photo views.
class PhotoListView(ListView):
queryset = Photo.objects.on_site().is_public()
paginate_by = 20
class PhotoDetailView(DetailView):
queryset = Photo.objects.on_site().is_public()
class PhotoDateView(object):
queryset = Photo.objects.on_site().is_public()
date_field = 'date_added'
allow_empty = True
class PhotoDateDetailView(PhotoDateView, DateDetailView):
pass
class PhotoArchiveIndexView(PhotoDateView, ArchiveIndexView):
pass
class PhotoDayArchiveView(PhotoDateView, DayArchiveView):
pass
class PhotoMonthArchiveView(PhotoDateView, MonthArchiveView):
pass
class PhotoYearArchiveView(PhotoDateView, YearArchiveView):
make_object_list = True
# Deprecated views.
class DeprecatedMonthMixin(object):
"""Representation of months in urls has changed from a alpha representation ('jan' for January)
to a numeric representation ('01' for January).
Properly deprecate the previous urls."""
query_string = True
month_names = {'jan': '01',
'feb': '02',
'mar': '03',
'apr': '04',
'may': '05',
'jun': '06',
'jul': '07',
'aug': '08',
'sep': '09',
'oct': '10',
'nov': '11',
'dec': '12', }
def get_redirect_url(self, *args, **kwargs):
print('a')
warnings.warn(
DeprecationWarning('Months are now represented in urls by numbers rather than by '
'their first 3 letters. The old style will be removed in Photologue 3.4.'))
class GalleryDateDetailOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(GalleryDateDetailOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:gallery-detail', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day'],
'slug': kwargs['slug']})
class GalleryDayArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(GalleryDayArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:gallery-archive-day', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day']})
class GalleryMonthArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(GalleryMonthArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:gallery-archive-month', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']]})
class PhotoDateDetailOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(PhotoDateDetailOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:photo-detail', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day'],
| 'slug': kwargs['slug' | ]})
class PhotoDayArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(PhotoDayArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:photo-archive-day', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']],
'day': kwargs['day']})
class PhotoMonthArchiveOldView(DeprecatedMonthMixin, RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
super(PhotoMonthArchiveOldView, self).get_redirect_url(*args, **kwargs)
return reverse('photologue:photo-archive-month', kwargs={'year': kwargs['year'],
'month': self.month_names[kwargs['month']]})
|
fs714/drfilter | tools/novasetup.py | Python | apache-2.0 | 995 | 0 | # Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY | _VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[se | c]:
print("\t" + key + " = " + conf[sec][key])
|
rfhk/awo-custom | sale_line_quant_extended/models/__init__.py | Python | lgpl-3.0 | 208 | 0 | # -*- coding: utf-8 -*-
from | . import sale_order
from . import sale_order_line
from . import procurement
from . import stock_quant
from . import stock_move
from . import | stock_picking
from . import purchase
|
ksamuel/smit | vessels/crawler/wamp_client.py | Python | mit | 4,286 | 0.001633 | import os
import sys
import asyncio
from pathlib import Path
import pendulum
sys.path.append(str(Path(__file__).absolute().parent.parent.parent))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner # noqa
import devpy.develop as log
# Boost the log max file size to 1Gb
log.handlers[0].maxBytes *= 1000
from vessels.models import VesselActivity # noqa
from vessels.crawler.ftp_client import crawl_csv, save_csv # noqa
from vessels.crawler.nh_client import process_xml, crawl_xml # noqa
class WampClient(ApplicationSession):
async def onJoin(self, details):
loop = asyncio.get_event_loop()
async def update_activity_status(id, status):
""" Update ship status for the given activity """
def _(id, value):
log.info(f'Update activity "{id}" status to "{status}"')
activity = VesselActivity.objects.get(id=id)
activity.status = status or None
activity.save()
return activity.to_dict(
timezone="Europe/Paris", include_vessel=True
)
activity = await loop.run_in_executor(None, _, id, status)
activity['timestamp'] = pendulum.utcnow().timestamp()
log.info(f'Update status info for activity {activity!r}')
self.publish('smit.activity.update', activity)
return activity
self.register(update_activity_status, 'smit.activity.update.status')
async def update_vessel_helico(id, helico):
""" Update helicopter approval for the vessel of this activity """
def _(id, value):
activity = VesselActivity.objects.get(id=id)
vessel = ac | tivity.vessel
| log.info(f'Update vessel "{vessel.id}" helico to "{helico}"')
vessel.helico = helico or None
vessel.save()
return activity.to_dict(
timezone="Europe/Paris", include_vessel=True
)
activity = await loop.run_in_executor(None, _, id, helico)
activity['timestamp'] = pendulum.utcnow().timestamp()
log.info(f'Update helico info for activity {activity!r}')
self.publish('smit.activity.update', activity)
return activity
self.register(update_vessel_helico, 'smit.vessel.update.helico')
async def update_vessel_helico_obs(id, obs):
""" Update helicopter obs for the vessel of this activity """
def _(id, value):
activity = VesselActivity.objects.get(id=id)
vessel = activity.vessel
log.info(f'Update vessel "{vessel.id}" helico to "{obs}"')
vessel.helico_observation = obs or None
vessel.save()
return activity.to_dict(
timezone="Europe/Paris", include_vessel=True
)
activity = await loop.run_in_executor(None, _, id, obs)
activity['timestamp'] = pendulum.utcnow().timestamp()
log.info(f'Update helico obs for activity {activity!r}')
self.publish('smit.activity.update', activity)
return activity
self.register(update_vessel_helico_obs, 'smit.vessel.update.helico_obs')
async def publish_csv_update(stream):
activities = await save_csv(stream)
self.publish('smit.sirene.csv.update', activities)
coro = crawl_csv(
host="localhost",
login="user",
pwd="password",
port=2121,
path="fixture.csv",
csv_callback=publish_csv_update,
tick=3
)
asyncio.ensure_future(coro)
async def publish_xml_update(stream):
distances = await process_xml(stream)
self.publish('smit.nh.xml.update', distances)
asyncio.ensure_future(crawl_xml(xml_callback=publish_xml_update))
if __name__ == '__main__':
runner = ApplicationRunner("ws://127.0.0.1:3333/ws", "realm1")
runner.run(WampClient)
|
amirgeva/py2d | tools/combine_tiles.py | Python | bsd-2-clause | 1,330 | 0 | import sys
import glob
from PIL import Image
import cv2
import numpy as | np
import cmdline as cmd
gap = 0
output = | 'tiles.png'
def set_gap(param):
global gap
gap = int(param)
def set_output(param):
global output
output = param
def main():
cmd.flag('gap', True, set_gap)
cmd.flag('out', True, set_output)
cmd.parse()
print(f"Combining images with gap {gap}")
x = gap
y = gap
rects = {}
for arg in cmd.arguments:
filenames = glob.glob(arg)
for filename in filenames:
pimg = Image.open(filename).convert('RGBA')
img = np.array(pimg)
r = img[:, :, 0]
g = img[:, :, 1]
b = img[:, :, 2]
a = img[:, :, 3]
img = cv2.merge((b, g, r, a))
h, w = img.shape[0], img.shape[1]
r = (x, y, x + w, y + h)
rects[r] = img
x = x + w + gap
if x > 200:
x = gap
y = y + h + gap
if len(rects) == 0:
print("No input images specified")
else:
outimg = np.zeros((y + h + gap, 300, 4), dtype=np.uint8)
for r in rects.keys():
img = rects.get(r)
outimg[r[1]:r[3], r[0]:r[2], :] = img
cv2.imwrite(output, outimg)
if __name__ == '__main__':
main()
|
Captain-Coder/tribler | Tribler/Core/Utilities/install_dir.py | Python | lgpl-3.0 | 1,960 | 0.002041 | """
install_dir.
Author(s): Elric Milon
"""
from __future__ import absolute_import
import os.path
import sys
from six import text_type
import Tribler
from Tribler.Core.osutils import is_android
def is_frozen():
"""
Return whether we are running in a frozen environment
"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
return False
return True
def get_base_path():
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.join(os.path.dirname(Tribler.__file__), '..')
return base_path
def get_lib_path():
if is_frozen():
return os.path.join(get_base_path(), 'tribler_source', 'Tribler')
return os.path.join(get_base_path(), 'Tribler')
# | This function is used from tribler.py too, but can't be there as tribler.py gets frozen into an exe on windows.
def determine_install_dir():
# Niels, 2011-03-03: Working dir sometimes set to a browsers working dir
# only seen on windows
# apply trick to obtain the executable location
# see http://www.py2exe.org/index.cgi/WhereAmI
# Niels, 2012-01-31: py2exe should only apply to w | indows
# TODO(emilon): tribler_main.py is not frozen, so I think the special
# treatment for windows could be removed (Needs to be tested)
if sys.platform == 'win32':
return get_base_path()
elif sys.platform == 'darwin':
return get_base_path()
elif is_android():
return os.path.abspath(os.path.join(text_type(os.environ['ANDROID_PRIVATE']), u'lib/python2.7/site-packages'))
this_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
return '/usr/share/tribler' if this_dir.startswith('/usr/lib') else this_dir
|
dana-i2cat/felix | modules/resource/manager/stitching-entity/src/delegate/geni/v3/models/sdn_resource_table.py | Python | apache-2.0 | 1,820 | 0.004396 | import pymongo
class Ports(object):
def __init__(self):
self.__ports = []
def get(self):
return self.__ports
def add(self, name, num):
port = {'name': name,
'num': num}
self.__ports.append(port)
class SDNDatapathResourceTable(object):
def __init__(self):
self.table = pymongo.MongoClient().felix_ro.SDNDatapathResourceTable
def insert(self, rm_uuid, component_id, component_manager_id, dpid,
ports=Ports().get()):
row = {'rm_uuid': rm_uuid,
'component_id': component_i | d,
'component_manager_id': component_manager_id,
'dpid': dpid,
'ports': ports}
# Return the ID of the new entry
return self.table.insert(row)
def delete(self, rm_uuid, | component_id, component_manager_id, dpid):
row = {'rm_uuid': rm_uuid,
'component_id': component_id,
'component_manager_id': component_manager_id,
'dpid': dpid}
self.table.remove(row)
class SDNLinkResourceTable(object):
def __init__(self):
self.table = pymongo.MongoClient().felix_ro.SDNLinkResourceTable
def insert(self, rm_uuid, dstDPID, dstPort, srcDPID, srcPort):
row = {'rm_uuid': rm_uuid,
'dstDPID': dstDPID,
'dstPort': dstPort,
'srcDPID': srcDPID,
'srcPort': srcPort}
# Return the ID of the new entry
return self.table.insert(row)
def delete(self, rm_uuid, dstDPID, dstPort, srcDPID, srcPort):
row = {'rm_uuid': rm_uuid,
'dstDPID': dstDPID,
'dstPort': dstPort,
'srcDPID': srcDPID,
'srcPort': srcPort}
self.table.remove(row)
|
sony/nnabla | python/test/function/test_where.py | Python | apache-2.0 | 1,821 | 0 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, | software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_t | est_utils import list_context
ctxs = list_context('Where')
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
def test_where_forward_backward(seed, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inshape = (2, 3, 4)
inputs = [
(rng.rand(*inshape) > 0.5).astype(np.float32),
rng.randn(*inshape),
rng.randn(*inshape),
]
function_tester(rng, F.where, np.where, inputs,
backward=[False, True, True],
ctx=ctx, func_name=func_name)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
def test_where_double_backward(seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inshape = (2, 2)
inputs = [
(rng.rand(*inshape) > 0.5).astype(np.float32),
rng.randn(*inshape),
rng.randn(*inshape),
]
backward_function_tester(rng, F.where, inputs,
backward=[False, True, True], dstep=1e3,
ctx=ctx)
|
mrbox/django | django/test/utils.py | Python | bsd-3-clause | 21,134 | 0.000284 | import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.db import reset_queries
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
| keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pr | e-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
|
sputnick-dev/weboob | modules/bred/bred/browser.py | Python | agpl-3.0 | 5,970 | 0.002345 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from decimal import Decimal
from weboob.capabilities.bank import Account, Transaction
from weboob.exceptions import BrowserIncorrectPassword, BrowserHTTPError, BrowserUnavailable, ParseError
from weboob.browser import DomainBrowser
__all__ = ['BredBrowser']
class BredBrowser(DomainBrowser):
BASEURL = 'https://www.bred.fr'
def __init__(self, accnum, login, password, *args, **kwargs):
super(BredBrowser, self).__init__(*args, **kwargs)
self.login = login
self.password = password
self.accnum = accnum
def do_login(self, login, password):
r = self.open('/transactionnel/Authentication', data={'identifiant': login, 'password': password})
if 'gestion-des-erreurs/erreur-pwd' in r.url:
raise BrowserIncorrectPassword('Bad login/password.')
if 'gestion-des-erreurs/opposition' in r.url:
raise BrowserIncorrectPassword('Your account is disabled')
if '/pages-gestion-des-erreurs/erreur-technique' in r.url:
raise BrowserUnavailable('A technical error occured')
ACCOUNT_TYPES = {'000': Account.TYPE_CHECKING,
'999': Account.TYPE_MARKET,
'011': Account.TYPE_CARD,
'023': Account.TYPE_SAVINGS,
'078': Account.TYPE_SAVINGS,
'080': Account.TYPE_SAVINGS,
'027': Account.TYPE_SAVINGS,
'037': Account.TYPE_SAVINGS,
'730': Account.TYPE_DEPOSIT,
}
def api_open(self, *args, **kwargs):
try:
return super(BredBrowser, self).open(*args, **kwargs | )
except BrowserHTTPError:
self.do_login(self.login, self.password)
retu | rn super(BredBrowser, self).open(*args, **kwargs)
def get_accounts_list(self):
r = self.api_open('/transactionnel/services/rest/Account/accounts')
for content in r.json()['content']:
if self.accnum != '00000000000' and content['numero'] != self.accnum:
continue
for poste in content['postes']:
a = Account()
a._number = content['numeroLong']
a._nature = poste['codeNature']
a._consultable = poste['consultable']
a.id = '%s.%s' % (a._number, a._nature)
a.type = self.ACCOUNT_TYPES.get(poste['codeNature'], Account.TYPE_UNKNOWN)
if 'numeroDossier' in poste and poste['numeroDossier']:
a._file_number = poste['numeroDossier']
a.id += '.%s' % a._file_number
if poste['postePortefeuille']:
a.label = u'Portefeuille Titres'
a.balance = Decimal(str(poste['montantTitres']['valeur']))
a.currency = poste['montantTitres']['monnaie']['code'].strip()
yield a
if 'libelle' not in poste:
continue
a.label = ' '.join([content['intitule'].strip(), poste['libelle'].strip()])
a.balance = Decimal(str(poste['solde']['valeur']))
a.currency = poste['solde']['monnaie']['code'].strip()
yield a
def get_history(self, account):
if not account._consultable:
raise NotImplementedError()
offset = 0
next_page = True
seen = set()
while next_page:
r = self.api_open('/transactionnel/services/applications/operations/get/%(number)s/%(nature)s/00/%(currency)s/%(startDate)s/%(endDate)s/%(offset)s/%(limit)s' %
{'number': account._number,
'nature': account._nature,
'currency': account.currency,
'startDate': '2000-01-01',
'endDate': date.today().strftime('%Y-%m-%d'),
'offset': offset,
'limit': 50
})
next_page = False
offset += 50
transactions = []
for op in reversed(r.json()['content']['operations']):
next_page = True
t = Transaction()
if op['id'] in seen:
raise ParseError('There are several transactions with the same ID, probably an infinite loop')
t.id = op['id']
seen.add(t.id)
t.amount = Decimal(str(op['montant']))
t.date = date.fromtimestamp(op.get('dateDebit', op.get('dateOperation'))/1000)
t.rdate = date.fromtimestamp(op.get('dateOperation', op.get('dateDebit'))/1000)
t.vdate = date.fromtimestamp(op.get('dateValeur', op.get('dateDebit', op.get('dateOperation')))/1000)
if 'categorie' in op:
t.category = op['categorie']
t.label = op['libelle']
t.raw = ' '.join([op['libelle']] + op['details'])
transactions.append(t)
# Transactions are unsorted
for t in sorted(transactions, key=lambda t: t.rdate, reverse=True):
yield t
|
jstarc/nli_generation | hierarchical_softmax.py | Python | mit | 2,566 | 0.010912 | from keras import backend as K
from keras import initializations
from keras.backend.common import _EPSILON
from keras.engine.topology import Layer
from keras.engine import InputSpec
from theano.tensor.nnet import h_softmax
import theano.tensor as T
class HierarchicalSoftmax(Layer):
def __init__(self, output_dim, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.output_dim = output_dim
def hshape(n):
from math import sqrt, ceil
l1 = ceil(sqrt(n))
l2 = ceil(n / l1)
return int(l1), int(l2)
self.n_classes, self.n_outputs_per_class = hshape(output_dim)
super(HierarchicalSoftmax, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=shape) for shape in input_shape]
input_dim = self.input_spec[0].shape[-1]
self.W1 = self.init((input_dim, self.n_classes), name='{}_W1'.format(self.name))
self.b1 = K.zeros((self.n_classes,), name='{}_b1'.format(self.name))
self.W2 = self.init((self.n_classes, input_dim, self.n_outputs_per_class), name='{}_W2'.format(self.name))
self.b2 = K.zeros((self.n_classes, self.n_outputs_per_class), name='{}_b2'.format(self.name))
self.trainable_weights = [self.W1, self.b1, self.W2, self.b2]
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][1], None)
def call(self, X, mask=None):
input_shape = self.input_spec[0].shape
x = K.reshape(X[0], (-1, input_shape[2]))
target = X[1].flatten() if self.trainable else None
Y = h_softmax(x, K | .shape(x)[0], self.output_dim,
self.n_classes, self.n_outputs_per_class,
self.W1, self.b1, self.W2, self.b2, target)
output_dim = 1 if self.trainable else self.output_dim
input_length = K.shape(X[0])[1]
y = K.reshap | e(Y, (-1, input_length, output_dim))
return y
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__}
base_config = super(HierarchicalSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def hs_categorical_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(y_pred, y_true)
|
indevgr/django | django/core/management/commands/migrate.py | Python | bsd-3-clause | 13,610 | 0.002425 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.checks import Tags, run_checks
from django.core.management.base import BaseCommand, CommandError
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ProjectState
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true', dest='fake_initial', default=False,
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--run-syncdb', action='store_true', dest='run_syncdb',
help='Creates tables for apps without migrations.',
)
def _run_checks(self, **kwargs):
issues = run_checks(tags=[Tags.database])
issues.extend(super(Command, self).check(**kwargs))
return issues
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options['database']
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
| )
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], o | ptions['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted(set(a for a, n in targets))) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
emit_pre_migrate_signal(self.verbosity, self.interactive, connection.alias)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
executor.check_replacements()
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write( |
google-research/google-research | alx/batching_utils.py | Python | apache-2.0 | 19,361 | 0.009245 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utilities for common batching needs across datasets."""
import collections
import dataclasses
import random
import typing
from typing import Any
from typing import List
import jax
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
@dataclasses.dataclass
class Batch():
"""Holds a densely packed batch of examples.
This data structure will be used as an intermediate state before converting to
TF Example format.
Examples:
1. Densely pack a single example:
row_id=r0, history=(h0, ..., hm), ground_truth=(g0, ..., gn)
Both history and ground truth will be packed into a 2D-tensor of size
[batch_size, seq_len] and [ground_truth_batch_size, seq_len] respectively.
If s = seq_len, batch may look like
[h0, ..., h(s-1)
hs, ..., h(2*s-1)
.
.
.
-1, ..., -1]
and ground_truth_batch may look like
[g0, ..., g(s-1)
gs, ..., g(2*s-1)
.
.
.
-1, ..., -1]
2. Densely pack multiple examples in a single batch:
In this case, we would pack the history and ground truth as before but we
would also have to keep track of which rows in batch (ground_truth_batch)
correspond to the same example in the pre-batched set.
We keep track of this in batch_ids (ground_truth_batch_ids) tensor.
If batch_ids = [0, 1, 1, 2, 2, 2, -1], then first row in batch tensor
corresponds to a single distinct example before dense batching. Rows 1 and 2
jointly corresponds to another distinct example and so forth.
We use -1 as padding value to make sure we can keep the shapes of all
batched tensors constant across the whole dataset.
"""
is_test: bool
seq_len: int
num_rows_per_batch: int
# Densely packed history.
row_ids: List[int] # (num_rows_per_batch)
item_lengths: List[int] # (num_rows_per_batch)
batch_size: int
batch_ids: List[int] # (batch_size)
batch: List[List[int]] # (batch_size, seq_len)
# Densely packed ground truth. Non empty only if is_test=True.
ground_truth_batch_size: int
ground_truth_batch_ids: List[int] # (ground_truth_batch_size)
ground_truth_batch: List[
List[int]] # (ground_truth_batch_size, seq_len)
def batch_with_batch_size(xs,
batch_size,
num_rows_per_batch,
seq_len = 16,
is_test = False,
ground_truth_batch_size = 0,
shuffle = False):
"""Create batches from a list of examples.
Splits long user | histories and ground truth into multiple examples and
pads any extra space with -1.
This function assumes that no user history is large enou | gh to overfill the
space in a single batch i.e. all user histories <= batch_size * seq_len.
Args:
xs: list of examples. (row_id, history, ground_truth) if is_test if True,
(row_id, history) otherwise.
batch_size: size of the batch.
num_rows_per_batch: fixed number of rows per batch. We keep this static to
not change the shapes of tensors every iteration.
seq_len: sequence length of the user history.
is_test: if this the test set, we process batch ground_truth as well.
ground_truth_batch_size: size of ground truth batch.
shuffle: is True, we shuffle xs before batching.
Returns:
Batches for the whole dataset.
"""
batches = []
if shuffle:
# We first check if its a multi-process TPU setup, each process will end up
# having a different order and dataset.shard will NOT work correctly.
if jax.process_count() > 1:
raise ValueError(
"Shuffling is not allowed in multi-process setup since dataset.shard"
"will stop working correctly later on."
)
random.shuffle(xs)
batch = Batch(
is_test=is_test,
batch_size=batch_size,
seq_len=seq_len,
num_rows_per_batch=num_rows_per_batch,
batch=[],
batch_ids=[],
row_ids=[],
item_lengths=[],
ground_truth_batch_size=ground_truth_batch_size,
ground_truth_batch_ids=[],
ground_truth_batch=[])
current_batch_id = 0
for x in xs:
row_id = x[0]
history = x[1]
if batch_size * seq_len < len(history):
raise ValueError(
"Increase batch size or seq len so that we can fit the largest "
"example in a single batch."
)
is_over_ground_truth_batch_size = False
if is_test:
ground_truth = x[2]
if ground_truth_batch_size * seq_len < len(ground_truth):
raise ValueError(
"Increase ground_truth_batch_size or seq len so that we can fit the"
"largest ground truth in a single batch."
)
ground_truth_capacity_needed = (len(ground_truth) // seq_len) + 1
is_over_ground_truth_batch_size = len(
batch.ground_truth_batch
) + ground_truth_capacity_needed > ground_truth_batch_size
capacity_needed = (len(history) // seq_len) + 1
is_over_batch_size = len(batch.batch) + capacity_needed > batch_size
is_over_num_users_per_batch = current_batch_id + 1 > num_rows_per_batch
if (is_over_batch_size or is_over_num_users_per_batch or
is_over_ground_truth_batch_size):
# Append null user histrories to last user and make sure number of users
# stay constant.
num_remaining = batch_size - len(batch.batch)
batch.batch.extend([[-1] * seq_len] * num_remaining)
batch.batch_ids.extend([current_batch_id - 1] * num_remaining)
batch.row_ids.extend([-1] * (num_rows_per_batch - current_batch_id))
batch.item_lengths.extend([0] * (num_rows_per_batch - current_batch_id))
if is_test:
ground_truth_num_remaining = ground_truth_batch_size - len(
batch.ground_truth_batch)
batch.ground_truth_batch.extend([[-1] * seq_len] *
ground_truth_num_remaining)
batch.ground_truth_batch_ids.extend([current_batch_id - 1] *
ground_truth_num_remaining)
batches.append(batch)
# Reset.
batch = Batch(
is_test=is_test,
batch_size=batch_size,
seq_len=seq_len,
num_rows_per_batch=num_rows_per_batch,
batch=[],
batch_ids=[],
row_ids=[],
item_lengths=[],
ground_truth_batch_size=ground_truth_batch_size,
ground_truth_batch_ids=[],
ground_truth_batch=[])
current_batch_id = 0
batch.row_ids.append(row_id)
batch.item_lengths.append(len(history))
start_index = 0
while start_index < len(history):
end_index = min(len(history), start_index + seq_len)
history_to_be_appended = history[start_index:end_index]
history_to_be_appended += [-1] * (
seq_len - len(history_to_be_appended))
batch.batch.append(history_to_be_appended)
batch.batch_ids.append(current_batch_id)
start_index += seq_len
if is_test:
ground_truth_start_index = 0
while ground_truth_start_index < len(ground_truth):
ground_truth_end_index = min(
len(ground_truth), ground_truth_start_index + seq_len)
ground_truth_to_be_appended = ground_truth[
ground_truth_start_index:ground_truth_end_index]
ground_truth_to_be_appended += [-1] * (
seq_len - len(ground_truth_to_be_appended))
batch.ground_truth_batch.append(ground_truth_to_be_appended)
batch.ground_truth_batch_ids.append(current_batch_id)
ground_truth_start_index += seq_len
current_batch_i |
trehn/teamvault | teamvault/apps/secrets/migrations/0020_auto_20180220_1356.py | Python | gpl-3.0 | 752 | 0 | # Generated by Django 2.0 on 2018-02-20 13:56
from django.db import migrations
def copy_owner_data(apps, schema_editor):
Secret = apps.get_model('secrets', 'Secret')
for secret in Secret.objects.all():
secret.allowed_groups.add(*list(secret.owner_groups.all()))
secret.allowed_users.add(*list(secret.owner_users.all()))
secret.notify_on_access_request.add(*list(secret.owner_users.all()))
for group in secret.owner_groups.all | ():
secret.notify_on_access_request.add(*list(group.user_set.all()))
class Migration(migrations.Migration):
dependencies = [
('secrets', '0019_secret_notify_on_access_request'),
]
operations = [
migrat | ions.RunPython(copy_owner_data),
]
|
Parisson/TimeSide | timeside/server/management/commands/timeside-save-items.py | Python | agpl-3.0 | 346 | 0 | from django.core.management.base import BaseCommand
from timeside.server | .models import Item
class Command(BaseCommand):
help = """
Save all Items to populate ext | ernal id, sha1,
mime type, samplerate and audio duration
"""
def handle(self, *args, **options):
for item in Item.objects.all():
item.save()
|
yongwen/makahiki | makahiki/apps/managers/challenge_mgr/tests.py | Python | mit | 2,099 | 0.002382 | """Tests the challenge_mgr module."""
import datetime
from django.contrib.auth.models import User
from django.test import TransactionTestCase
from django.core.urlresolvers import reverse
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.challenge_mgr.models import RoundSetting
from apps.utils import test_utils
class ContextProcessorFunctionalTestCase(TransactionTestCase):
"""Tests that the proper variables are loaded into a page."""
def testRoundInfo(self):
"""Tests that round info is available for the page to process."""
challenge_mgr.init()
test_utils.set_competition_round()
current_round = challenge_mgr.get_round_name()
User.objects.create_user("user", "user@test.com", password="changeme")
self.client.login(username="user", password="changeme")
challenge_mgr.register_page_widget("home", "home")
response = self.client.get(reverse("home_index"))
# Response context should have round info corresponding to the past days.
self.assertEqual(response.context["CURRENT_ROUND_INFO"]["name"], current_round,
"Expected %s but got %s" % (
current_round, response.context["CURRENT_ROUND_INFO"]["name"]))
class BaseUnitTestCase(TransactionTestCase):
"""basic setting test"""
def testCurrentRound(self):
"""Tests that the current round retrieval is correct."""
current_round = "Round 1"
| test_utils.set_competition_round()
current = challenge_mgr.get_round_name()
self.assertEqual(current, current_round,
"Test that the current round is returned.")
start = datetime.datetime.today() + datetime.timedelta(days=1)
end = start + datetime.timedelta(days=7)
rounds = RoundSetting.objects.get(name="Round 1")
rounds.sta | rt = start
rounds.end = end
rounds.save()
challenge_mgr.init()
current_round = challenge_mgr.get_round_name()
self.assertTrue(current_round is None,
"Test that there is no current round.")
|
antoinecarme/pyaf | tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingMedian_Seasonal_DayOfMonth_SVR.py | Python | bsd-3-clause | 160 | 0.05 | import tests. | model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['Mov | ingMedian'] , ['Seasonal_DayOfMonth'] , ['SVR'] ); |
alpatania/MapperTools | setup.py | Python | gpl-3.0 | 791 | 0.003793 | from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='MapperTools',
packages=['MapperTools'],
version='0.1',
description='A | python 2.7 implementation of Mapper algorithm for Topological Data Analysis',
keywords='mapper TDA python',
long_description=readme(),
url='http://github.com/alpatania',
author='Alice Patania',
author_email='alice.patania@gmail.com',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'],
install_requires=['hdbscan', 'sklearn', | 'pandas', 'collections'],
include_package_data=True,
zip_safe=False) |
labsland/labmanager | labmanager/rlms/ext/rest.py | Python | bsd-2-clause | 9,234 | 0.010613 | # -*-*- encoding: utf-8 -*-*-
#
# gateway4labs is free software: you can redistribute it and/or modify
# it under the terms of the BSD 2-Clause License
# gateway4labs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY | or FITNESS FOR A PARTICULAR PURPOSE.
import sys
import json
import requests
import traceback
from flask import current_app
from flask.ext.wtf import TextField, Required, URL, PasswordField, SelectField
from labmanager.forms import AddForm, RetrospectiveForm, GenericPermissionForm
from labmanager.rlms import register, Laboratory, BaseRLMS, B | aseFormCreator, Versions, Capabilities
def get_module(version):
return sys.modules[__name__]
class HttpAddForm(AddForm):
base_url = TextField("Base URL", validators = [Required(), URL(False) ])
login = TextField("Login", validators = [Required() ])
password = PasswordField("Password", validators = [])
extension = TextField("Extension", validators = [], description = "If required, provide an extension (e.g., .php) to the HTTP API")
mode = SelectField("Mode", choices=[('json', 'Pure JSON requests and responses'), ('json+form', 'JSON for responses, HTML forms for requests')], default = "json")
def __init__(self, add_or_edit, *args, **kwargs):
super(HttpAddForm, self).__init__(*args, **kwargs)
@staticmethod
def process_configuration(old_configuration, new_configuration):
return new_configuration
class HttpPermissionForm(RetrospectiveForm):
pass
class HttpLmsPermissionForm(HttpPermissionForm, GenericPermissionForm):
pass
class HttpFormCreator(BaseFormCreator):
def get_add_form(self):
return HttpAddForm
def get_permission_form(self):
return HttpPermissionForm
def get_lms_permission_form(self):
return HttpLmsPermissionForm
FORM_CREATOR = HttpFormCreator()
class RLMS(BaseRLMS):
def __init__(self, configuration):
self.configuration = configuration
config = json.loads(configuration or '{}')
self.base_url = config.get('base_url')
if self.base_url.endswith('/'):
self.base_url = self.base_url[:-1]
self.login = config.get('login')
self.password = config.get('password')
self.extension = config.get('extension', '')
self.context_id = str(config.get('context_id', ''))
self.mode = config.get('mode', 'json')
if not self.base_url or not self.login or not self.password:
raise Exception("Laboratory misconfigured: fields missing" )
def _inject_extension(self, remaining):
method_and_get_query = remaining.split('?',1)
if len(method_and_get_query) == 1:
return method_and_get_query[0] + self.extension
else: # 2
method, get_query = method_and_get_query
return method + self.extension + '?' + get_query
def _request(self, remaining, headers = {}):
remaining = self._inject_extension(remaining)
if '?' in remaining:
context_remaining = remaining + '&context_id=' + self.context_id
else:
context_remaining = remaining + '?context_id=' + self.context_id
url = '%s%s' % (self.base_url, context_remaining)
r = HTTP_PLUGIN.cached_session.get(url, auth = (self.login, self.password), headers = headers)
r.raise_for_status()
try:
return r.json()
except ValueError:
raise
def _request_post(self, remaining, data, headers = None):
remaining = self._inject_extension(remaining)
if headers is None:
headers = {}
if '?' in remaining:
context_remaining = remaining + '&context_id=' + self.context_id
else:
context_remaining = remaining + '?context_id=' + self.context_id
headers['Content-Type'] = 'application/json'
if self.mode == 'json':
data = json.dumps(data)
elif self.mode == 'json+form':
data = data
else:
raise Exception("Misconfigured mode: %s" % self.mode)
# Cached session will not cache anything in a post. But if the connection already exists to the server, we still use it, becoming faster
r = HTTP_PLUGIN.cached_session.post('%s%s' % (self.base_url, context_remaining), data = data, auth = (self.login, self.password), headers = headers)
return r.json()
def get_version(self):
return Versions.VERSION_1
def get_capabilities(self):
capabilities = HTTP_PLUGIN.rlms_cache.get('capabilities')
if capabilities is not None:
return capabilities
capabilities = self._request('/capabilities')
HTTP_PLUGIN.rlms_cache['capabilities'] = capabilities['capabilities']
return capabilities['capabilities']
def setup(self, back_url):
setup_url = self._request('/setup?back_url=%s' % back_url)
return setup_url['url']
def test(self):
response = self._request('/test_plugin')
valid = response.get('valid', False)
if not valid:
return response.get('error_messages', ['Invalid error message'])
def get_laboratories(self, **kwargs):
labs = HTTP_PLUGIN.rlms_cache.get('labs')
if labs is not None:
return labs
labs = self._request('/labs')['labs']
laboratories = []
for lab in labs:
laboratory = Laboratory(name = lab['name'], laboratory_id = lab['laboratory_id'], description = lab.get('description'), autoload = lab.get('autoload'))
laboratories.append(laboratory)
HTTP_PLUGIN.rlms_cache['labs'] = laboratories
return laboratories
def get_translations(self, laboratory_id, **kwargs):
cache_key = 'translations-%s' % laboratory_id
translations = HTTP_PLUGIN.rlms_cache.get(cache_key)
if translations is not None:
return translations
try:
translations_json = self._request('/translations?laboratory_id=%s' % requests.utils.quote(laboratory_id, ''))
except:
traceback.print_exc()
# Dont store in cache if error
return {'translations': {}, 'mails':[]}
for lang, lang_data in translations_json['translations'].items():
for key, data_value in lang_data.items():
data_value.pop('namespace', None)
HTTP_PLUGIN.rlms_cache[cache_key] = translations_json
return translations_json
def reserve(self, laboratory_id, username, institution, general_configuration_str, particular_configurations, request_payload, user_properties, *args, **kwargs):
request = {
'laboratory_id' : laboratory_id,
'username' : username,
'institution' : institution,
'general_configuration_str' : general_configuration_str,
'particular_configurations' : particular_configurations,
'request_payload' : request_payload,
'user_properties' : user_properties,
}
request.update(kwargs)
debug_mode = kwargs.get('debug', False) and current_app.debug
if debug_mode:
open('last_request.txt','w').write(json.dumps(request, indent = 4))
try:
response = self._request_post('/reserve', request)
except:
if debug_mode:
exc_info = traceback.format_exc()
open('last_request.txt','a').write(exc_info)
raise
else:
if debug_mode:
open('last_request.txt','a').write(json.dumps(response, indent = 4))
return {
'reservation_id' : response['reservation_id'],
'load_url' : response['load_url']
}
def load_widget(self, reservation_id, widget_name, **kwargs):
response = self._request('/widget?widget_name=%s' % widget_name, headers = { 'X-G4L-reservation-id' : reservation_id })
return {
'url' : response['url']
}
def get_check_urls(self, laboratory_id):
retur |
akosiaris/transliterate | src/transliterate/contrib/languages/hi/translit_language_pack.py | Python | gpl-2.0 | 1,416 | 0.005376 | # - | *- coding: utf-8 -*-
__title__ = 'transliterate.contrib.languages.hi.translit_language_pack'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('HindiLanguagePack',)
from transliterate.base import TranslitLanguagePack, registry
class HindiLan | guagePack(TranslitLanguagePack):
"""
Language pack for Hindi language. See
`http://en.wikipedia.org/wiki/Hindi` for details.
"""
language_code = "hi"
language_name = "Hindi"
character_ranges = ((0x0900, 0x097f),) # Fill this in
mapping = (
u"aeof", #AEOF
u"अइओफ",
# ae of
)
#reversed_specific_mapping = (
# u"θΘ",
# u"uU"
#)
pre_processor_mapping = {
u"b": u"बी",
u"g": u"जी",
u"d": u"डी",
u"z": u"जड़",
u"h": u"एच",
u"i": u"आई",
u"l": u"अल",
u"m": u"ऍम",
u"n": u"अन",
u"x": u"अक्स",
u"k": u"के",
u"p": u"पी",
u"r": u"आर",
u"s": u"एस",
u"t": u"टी",
u"y": u"वाय",
u"w": u"डब्लू",
u"u": u"यू",
u"c": u"सी",
u"j": u"जे",
u"q": u"क्यू",
u"z": u"जड़",
}
detectable = True
#registry.register(HindiLanguagePack)
|
Ajapaik/ajapaik-web | ajapaik/ajapaik/management/commands/refresh_albums.py | Python | gpl-3.0 | 1,895 | 0.003694 | from random import randint
import time
from django.contrib.gis.geos import Point
from django.core.management.base import BaseCommand
from ajapaik.ajapaik.models import Album
class Command(BaseComm | and):
help = 'Refresh albums'
def handle(self, *args, **options):
albums = Album.objects.exclude(atype__in=[Album.AUTO, Album.FAVORITES])
for a in albums:
historic_photo_qs = a.get_historic_photos_queryset_with_subalbums()
if not historic_photo_qs.exists():
continue
geotagged_ph | oto_qs = a.get_geotagged_historic_photo_queryset_with_subalbums()
a.photo_count_with_subalbums = historic_photo_qs.count()
a.geotagged_photo_count_with_subalbums = geotagged_photo_qs.count()
a.rephoto_count_with_subalbums = a.get_rephotos_queryset_with_subalbums().count()
a.comments_count_with_subalbums = a.get_comment_count_with_subalbums()
a.similar_photo_count_with_subalbums = a.get_similar_photo_count_with_subalbums()
a.confirmed_similar_photo_count_with_subalbums = a.get_confirmed_similar_photo_count_with_subalbums()
if not a.lat and not a.lon and a.geotagged_photo_count_with_subalbums:
random_index = randint(0, a.geotagged_photo_count_with_subalbums - 1)
random_photo = geotagged_photo_qs[random_index]
a.lat = random_photo.lat
a.lon = random_photo.lon
a.geography = Point(x=float(a.lon), y=float(a.lat), srid=4326)
else:
random_index = randint(0, historic_photo_qs.count() - 1)
random_photo = historic_photo_qs[random_index]
a.cover_photo = random_photo
if random_photo.flip:
a.cover_photo_flipped = random_photo.flip
a.light_save()
time.sleep(0.2)
|
hkpeprah/git-achievements | app/services/views.py | Python | gpl-2.0 | 2,967 | 0.002359 | import json
import re
from django.conf import settings
from django.http import HttpResponse
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import user_passes_test, login_required
from app.services.models import Event
from app.services.hooks import GithubHook
from app.achievement.hooks import check_for_unlocked_achievements
from app.services.utils import json_response, initialize_webhook_addresses, get_client_ip
@login_required
@require_http_methods(['GET'])
@user_passes_test(lambda u: u.is_superuser)
def service_events(request, service):
"""
Renders a page displaying the various supported events and payloads
for the the specified service.
@param request: HttpRequest object
@param service: String, the name of the service of whose events to grab
@return: HttpResponse
"""
events = [event for event in Event.objects.filter(service=service.title())]
for index, event in enumerate(events[:]):
name = event.name
title = ''.join(substr.title() for substr in re.split('[_\-]', event.name))
event = json.dumps(event.payload, indent=4,
separators=(",", ":"))
events[index] = {
'name': name,
'title': title,
'payload': event.lstrip().rstrip()
}
return render_to_response("services/events/index.html",
context_instance=RequestContext(request, {
'service': service.title(),
'events': events
})
)
@csrf_exempt
@require_http_methods(['GET', 'POST'])
def web_hook(request):
"""
Processes a Web Hook service request.
@param request: HttpRequest
@return: HttpResponse
"""
if request.method == "GET":
return HttpResponse("Hello World!", status=200)
headers, request_address = request.META, get_client_ip(request)
response = json_response({})
# Initialize web hook addresses if not already
initialize_webhook_addresses()
if request_address in settings.GITHUB_IP_ADDRESSES:
if settings.DEBUG:
print "Received webhook event from: %s" % request_address
event = headers.get('HTTP_X_GITHUB_EVENT', '')
payload = request.POST.get('payload', None)
if not payload:
payload = json.loads(request.body)
response = GithubHook.process_event(event, payload)
return response
@csrf_exempt
@require_http_methods(['POST'])
def web_local_hook(request):
"""
Processes a request from a loc | al web service request.
@param: HttpRequest
@return: HttpResponse
"""
data = json.loads(request.body)
data = check_for_unlocked_achievements(data.get('event'), data.get('payload'))
return HttpResponse(json.dumps(data), status=200,
content_type="application/js | on")
|
krafczyk/spack | var/spack/repos/builtin/packages/lammps/package.py | Python | lgpl-2.1 | 5,249 | 0.000572 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
#
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import datetime as dt
class Lammps(CMakePackage):
"""LAMMPS stands for Large-scale Atomic/Molecular Massively
Parallel Simulator. This package uses patch releases, not
stable release.
See https://github.com/spack/spack/pull/5342 for a detailed
discussion.
"""
homepage = "http://lammps.sandia.gov/"
url = "https://github.com/lammps/lammps/archive/patch_1Sep2017.tar.gz"
git = "https://github.com/lammps/lammps.git"
tags = ['ecp', 'ecp-apps']
version('develop', branch='master')
version('20180629', '6d5941863ee25ad2227ff3b7577d5e7c')
version('20180316', '25bad35679583e0dd8cb8753665bb84b')
version('20180222', '4d0513e3183bd57721814d217fdaf957')
version('20170922', '4306071f919ec7e759bda195c26cfd9a')
version('20170901', '767e7f07289663f033474dfe974974e7')
def url_for_version(self, version):
vdate = dt.datetime.strptime(str(version), "%Y%m%d")
return "https://github.com/lammps/lammps/archive/patch_{0}.tar.gz".format(
vdate.strftime("%d%b%Y").lstrip('0'))
supported_packages = ['asphere', 'body', 'class2', 'colloid', 'compress',
'coreshell', 'dipole', 'granular', 'kspace', 'latte',
'manybody', 'mc', 'meam', 'misc', 'molecule',
'mpiio', 'peri', 'poems', 'python', 'qeq', 'reax',
'replica', 'rigid', 'shock', 'snap', 'srd',
'user-atc', 'user-h5md', 'user-lb', 'user-misc',
'user-netcdf', 'user-omp', 'voronoi']
for pkg in supported_packages:
variant(pkg, default=False,
description='Activate the {0} package'.format(pkg))
variant('lib', default=True,
description='Build the liblammps in addition to the executable')
variant('mpi', default=True,
description='Build with mpi')
depends_on('mpi', when='+mpi')
depends_on('mpi', when='+mpiio')
depends_on('fftw', when='+kspace')
depends_on('voropp', when='+voronoi')
depends_on('netcdf+mpi', when='+ | user-netcdf')
depends_on('blas', when='+user-atc')
depends_on('lapack', when='+user-atc')
depends_on('latte@1.0.1', when='@:20180222+latte')
depends_on('latte@1.1.1:', when='@20180316:20180628+latte')
depends_on('latte@1.2.1:', when='@20180629:+latte')
depends_on('blas', when='+latte')
depends_on('lapack', when='+latte')
depends_on(' | python', when='+python')
depends_on('mpi', when='+user-lb')
depends_on('mpi', when='+user-h5md')
depends_on('hdf5', when='+user-h5md')
conflicts('+body', when='+poems@:20180628')
conflicts('+latte', when='@:20170921')
conflicts('+python', when='~lib')
conflicts('+qeq', when='~manybody')
conflicts('+user-atc', when='~manybody')
conflicts('+user-misc', when='~manybody')
conflicts('+user-phonon', when='~kspace')
conflicts('+user-misc', when='~manybody')
patch("lib.patch", when="@20170901")
patch("660.patch", when="@20170922")
root_cmakelists_dir = 'cmake'
def cmake_args(self):
spec = self.spec
mpi_prefix = 'ENABLE'
pkg_prefix = 'ENABLE'
if spec.satisfies('@20180629:'):
mpi_prefix = 'BUILD'
pkg_prefix = 'PKG'
args = [
'-DBUILD_SHARED_LIBS={0}'.format(
'ON' if '+lib' in spec else 'OFF'),
'-D{0}_MPI={1}'.format(
mpi_prefix,
'ON' if '+mpi' in spec else 'OFF')
]
if spec.satisfies('@20180629:+lib'):
args.append('-DBUILD_LIB=ON')
for pkg in self.supported_packages:
opt = '-D{0}_{1}'.format(pkg_prefix, pkg.upper())
if '+{0}'.format(pkg) in spec:
args.append('{0}=ON'.format(opt))
else:
args.append('{0}=OFF'.format(opt))
if '+kspace' in spec:
args.append('-DFFT=FFTW3')
return args
|
vollov/lotad | content/apps.py | Python | mit | 182 | 0 | from __future__ import | unicode_literals
from django.apps import AppConfig
class ContentCo | nfig(AppConfig):
"""
Module to manage i18n contents
"""
name = 'content'
|
nemanja-d/odoo_project_extensions | project_task_forecasts/models/milestone_template.py | Python | gpl-3.0 | 1,778 | 0.003937 | # -*- coding: utf-8 -*-
import logging
from openerp import models, fields, api, exceptions as ex
_logger = logging.ge | tLogger(__name__)
class MilestoneTemplate(models.Model):
_name = 'project.milestone.template'
_inherit = ['mail.thread', | 'ir.needaction_mixin']
name = fields.Char('Name', required=True, track_visibility='onchange')
line_ids = fields.One2many('project.milestone.template.line', 'milestone_template_id', string='Milestones')
project_id = fields.Many2one('project.project', 'Project', required=True, track_visibility='onchange')
is_default = fields.Boolean('Is default', track_visibility='onchange')
info = fields.Text('Description', track_visibility='onchange')
@api.constrains('is_default', 'project_id')
def default_project_template_constrains(self):
if self.is_default is True and self.project_id:
if self.search([('is_default', '=', True), ('project_id', '=', self.project_id.id), ('id', '!=', self.id)], count=True) > 0:
raise ex.ValidationError('Only one default template can be in a project.')
@api.one
def update_milestones(self):
err_msgs = []
task_list = self.env['project.task'].search([('milestone_template_id', '=', self.id)])
task_counter = len(task_list)
_logger.info('Apply template action started for %s tasks...' % (task_counter))
for task in task_list:
try:
task.apply_milestone_template()
task_counter -= 1
_logger.info('Template applied on task %s. Remaining %s.' % (task.name, task_counter))
except Exception as e:
err_msgs.append(str(e))
if len(err_msgs) > 0:
raise ex.UserError('\n\n\n'.join(err_msgs))
|
jcoady9/youtube-dl | youtube_dl/extractor/twitter.py | Python | unlicense | 15,024 | 0.002067 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
xpath_text,
remove_end,
int_or_none,
ExtractorError,
)
class TwitterBaseIE(InfoExtractor):
def _get_vmap_video_url(self, vmap_url, video_id):
vmap_data = self._download_xml(vmap_url, video_id)
return xpath_text(vmap_data, './/MediaFile').strip()
class TwitterCardIE(TwitterBaseIE):
IE_NAME = 'twitter:card'
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/(?:cards/tfw/v1|videos/tweet)/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
# MD5 checksums are different in different places
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'Twitter Card',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 30.033,
}
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': 'Twitter Card',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 80.155,
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'd4724ffe6d2437886d004fa5de1043b3',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'title': 'Ubuntu 11.10 Overview',
'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10...',
'upload_date': '20111013',
'uploader': 'OMG! Ubuntu!',
'uploader_id': 'omgubuntu',
},
'add_ie': ['Youtube'],
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568',
'md5': 'ab2745d0b0ce53319a534fccaa98643 | 9',
'info_dict': {
'id': 'iBb2x00UVlv',
'ext': 'mp4',
'upload_date': '20151113',
'uploader_id': '1189339351084113920',
'uploader': 'ArsenalTerje',
'title': 'Vine by ArsenalTerje',
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/i/videos/tweet/705235433198714880',
'md5': '3846d0a07109b5ab622 | 425449b59049d',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': 'Twitter web player',
'thumbnail': 're:^https?://.*\.jpg',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
config = None
formats = []
duration = None
webpage = self._download_webpage(url, video_id)
iframe_url = self._html_search_regex(
r'<iframe[^>]+src="((?:https?:)?//(?:www.youtube.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"',
webpage, 'video iframe', default=None)
if iframe_url:
return self.url_result(iframe_url)
config = self._parse_json(self._html_search_regex(
r'data-(?:player-)?config="([^"]+)"', webpage, 'data player config'),
video_id)
if config.get('source_type') == 'vine':
return self.url_result(config['player_url'], 'Vine')
def _search_dimensions_in_video_url(a_format, video_url):
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
a_format.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
video_url = config.get('video_url') or config.get('playlist', [{}])[0].get('source')
if video_url:
if determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(video_url, video_id, ext='mp4', m3u8_id='hls'))
else:
f = {
'url': video_url,
}
_search_dimensions_in_video_url(f, video_url)
formats.append(f)
vmap_url = config.get('vmapUrl') or config.get('vmap_url')
if vmap_url:
formats.append({
'url': self._get_vmap_video_url(vmap_url, video_id),
})
media_info = None
for entity in config.get('status', {}).get('entities', []):
if 'mediaInfo' in entity:
media_info = entity['mediaInfo']
if media_info:
for media_variant in media_info['variants']:
media_url = media_variant['url']
if media_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(media_url, video_id, ext='mp4', m3u8_id='hls'))
elif media_url.endswith('.mpd'):
formats.extend(self._extract_mpd_formats(media_url, video_id, mpd_id='dash'))
else:
vbr = int_or_none(media_variant.get('bitRate'), scale=1000)
a_format = {
'url': media_url,
'format_id': 'http-%d' % vbr if vbr else 'http',
'vbr': vbr,
}
# Reported bitRate may be zero
if not a_format['vbr']:
del a_format['vbr']
_search_dimensions_in_video_url(a_format, media_url)
formats.append(a_format)
duration = float_or_none(media_info.get('duration', {}).get('nanos'), scale=1e9)
self._sort_formats(formats)
title = self._search_regex(r'<title>([^<]+)</title>', webpage, 'title')
thumbnail = config.get('posterImageUrl') or config.get('image_src')
duration = float_or_none(config.get('duration')) or duration
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class TwitterIE(InfoExtractor):
IE_NAME = 'twitter'
_VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)'
_TEMPLATE_URL = 'https://twitter.com/%s/status/%s'
_TESTS = [{
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
'thumbnail': 're:^https?://.*\.jpg',
'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1',
'md5': 'f36dcd5fb92bf7057f155e7d927eeb42',
'info_dict': {
'id': '657991469417025536',
'ext': 'mp4',
'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai',
'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"',
'thumbnail': 're:^https?://.*\.png',
'uploader': 'Gifs',
'uploader_id': 'giphz',
},
'expected_warnings': ['height', 'width'],
'skip': 'Account suspended',
}, {
'url': 'https://twitter.com/starwars/status/665052190608723968',
'md5': '39b7199856dee6cd4432e72c74bc69d4',
'info_dict': {
'id': '665052190608723968',
'ext': 'mp4',
'title': 'Star Wars - |
codehacken/Athena | src/vision_opencv/cv_bridge/python/cv_bridge/__init__.py | Python | gpl-2.0 | 42 | 0 | f | rom .core import CvBridge, CvBridg | eError
|
AthenaYe/UFLDL_Tutorial | Chap1_Supervised_Learning_and_Optimization/logistic_regression.py | Python | gpl-2.0 | 1,696 | 0.004127 | #!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
import sklearn.datasets as skds
import numpy as np
import random
import theano.tensor as T
import theano
import matplotlib.pyplot as plt
import math
#I don't know what the jesus 'housing.data' means so I used self-generated dataset
x = np.arange(-50., 50., 1)
y = np.array(map(lambda tmp: 1.0/(1 + math.exp(-3 * tmp + 5.0)), x))
noise = np.random.uniform(-0.1, .1, size=len(x))
y += noise
print x
print y
#declarations
theta = theano.shared(np.random.uniform(-0.1, 0.1))
omeg | a = theano.shared(np.random.uniform(-0.1, 0.1))
X = T.dscalar('X')
Y = T.dscalar('Y')
#functions
prediction = 1/(1 + T.exp(-omega * X + theta))
loss1 = -Y * T.log(prediction)
loss2 = 1/2.0 * (prediction - Y) ** 2
predict = t | heano.function([X], prediction)
calculate_loss = theano.function([X, Y], loss2)
print predict(1.0)
#derivatives
dX = T.grad(loss2, X)
dtheta = T.grad(loss2, theta)
domega = T.grad(loss2, omega)
epsilon = .01
#gradient function
gradient_step = theano.function(
[X, Y],
updates=((omega, omega - epsilon * domega),
(theta, theta - epsilon * dtheta)))
#optimization
for i in range(100):
loss = 0
for j in range(len(x)):
gradient_step(x[j], y[j])
loss += calculate_loss(x[j], y[j])
print 'loss after' + str(i) + 'iterations.' + str(loss)
print x
print y
mul = 1 - 1/len(x)
plt.xlim(x.min() * mul, x.max() * mul)
plt.ylim(y.min() * mul, y.max() * mul)
plt.xlabel('x')
plt.ylabel('y')
plt.title('lr test')
plt.plot(x, y, 'ro')
xx = np.arange(x.min(), x.max(), 0.1)
yy = map(lambda abc: predict(abc), xx)
plt.plot(xx, yy, 'b')
plt.show()
# vim: ts=4 sw=4 sts=4 expandtab
|
RazvanRotari/iaP | services/uploaders/nytimes.py | Python | mit | 1,016 | 0.001969 | #!/usr/bin/env python3
import requests
import xmltodict
import upload
import model
RSS_ENDPOINT = "http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml"
def get_rss():
response = requests.get(RSS_ENDPOINT)
return xmltodict.parse(response.text)["rss"]
def main():
data = get_rss()
for item in data["channel"]["item"]:
newsItem = mo | del.NewsItem(item["guid"]["#text"])
newsItem.title = item["title"]
newsItem.provenance = "http://rss.nytimes.com"
newsItem.author = item["dc:creator"]
newsItem.timestamp = item["pubDate"]
if "category" in item:
print("\n\n\n")
print(item["category"])
if isinstance(item["category"], list):
newsItem. | categories = [x["#text"] for x in item["category"]]
else:
newsItem.categories = [item["category"]["#text"]]
newsItem.description = item["description"]
upload.upload_model(newsItem)
if __name__ == "__main__":
main()
|
danmichaelo/UKBot | ukbot/revision.py | Python | mit | 7,012 | 0.002567 | # encoding=utf-8
# vim: fenc=utf-8 et sw=4 ts=4 sts=4 ai
import weakref
import re
import urllib
from collections import OrderedDict
from datetime import datetime
import logging
import pytz
from mwtemplates import TemplateEditor
from mwtextextractor import get_body_text
from .common import _
logger = logging.getLogger(__name__)
class Revision(object):
def __init__(self, article, revid, **kwargs):
"""
A revision is uniquely identified by its revision id and its site
Arguments:
- article: (Article) article object reference
- revid: (int) revision id
"""
self.article = weakref.ref(article)
self.errors = []
self.revid = revid
self.size = -1
self.text = ''
self.point_deductions = []
self.parentid = 0
self.parentsize = 0
self.parenttext = ''
self.username = ''
self.parsedcomment = None
self.saved = False # Saved in local DB
self.dirty = False #
self._te_text = None # Loaded as needed
self._te_parenttext = None # Loaded as needed
for k, v in kwargs.items():
if k == 'timestamp':
self.timestamp = int(v)
elif k == 'parentid':
self.parentid = int(v)
elif k == 'size':
self.size = int(v)
elif k == 'parentsize':
self.parentsize = int(v)
elif k == 'username':
| self.username = v[0].upper() + v[1:]
elif k | == 'parsedcomment':
self.parsedcomment = v
elif k == 'text':
if v is not None:
self.text = v
elif k == 'parenttext':
if v is not None:
self.parenttext = v
else:
raise Exception('add_revision got unknown argument %s' % k)
for pd in self.article().user().point_deductions:
if pd['revid'] == self.revid and self.article().site().match_prefix(pd['site']):
self.add_point_deduction(pd['points'], pd['reason'])
def __repr__(self):
return "Revision(%s of %s:%s)" % (self.revid, self.article().site().key, self.article().name)
def __str__(self):
return self.__repr__()
def __hash__(self):
return hash(self.__repr__())
@property
def utc(self):
return pytz.utc.localize(datetime.fromtimestamp(self.timestamp))
@property
def wiki_tz(self):
return self.utc.astimezone(self.article().user().contest().wiki_tz)
def te_text(self):
if self._te_text is None:
self._te_text = TemplateEditor(re.sub('<nowiki ?/>', '', self.text))
return self._te_text
def te_parenttext(self):
if self._te_parenttext is None:
self._te_parenttext = TemplateEditor(re.sub('<nowiki ?/>', '', self.parenttext))
return self._te_parenttext
@property
def bytes(self):
return self.size - self.parentsize
@property
def words(self):
if self.article().site().host == 'www.wikidata.org':
# Don't do wordcount for wikidata
return 0
try:
return self._wordcount
except:
pass
mt1 = get_body_text(re.sub('<nowiki ?/>', '', self.text))
mt0 = get_body_text(re.sub('<nowiki ?/>', '', self.parenttext))
if self.article().site().key == 'ja.wikipedia.org':
words1 = len(mt1) / 3.0
words0 = len(mt0) / 3.0
elif self.article().site().key == 'zh.wikipedia.org':
words1 = len(mt1) / 2.0
words0 = len(mt0) / 2.0
else:
words1 = len(mt1.split())
words0 = len(mt0.split())
charcount = len(mt1) - len(mt0)
self._wordcount = words1 - words0
logger.debug('Wordcount: Revision %s@%s: %+d bytes, %+d characters, %+d words',
self.revid, self.article().site().key, self.bytes, charcount, self._wordcount)
if not self.new and words0 == 0 and self._wordcount > 1:
w = _('Revision [//%(host)s/w/index.php?diff=prev&oldid=%(revid)s %(revid)s]: The word count difference might be wrong, because no words were found in the parent revision (%(parentid)s) of size %(size)d, possibly due to unclosed tags or templates in that revision.') % {
'host': self.article().site().host,
'revid': self.revid,
'parentid': self.parentid,
'size': len(self.parenttext)
}
logger.warning(w)
self.errors.append(w)
elif self._wordcount > 10 and self._wordcount > self.bytes:
w = _('Revision [//%(host)s/w/index.php?diff=prev&oldid=%(revid)s %(revid)s]: The word count difference might be wrong, because the word count increase (%(words)d) is larger than the byte increase (%(bytes)d). Wrong word counts can occur for invalid wiki text.') % {
'host': self.article().site().host,
'revid': self.revid,
'words': self._wordcount,
'bytes': self.bytes
}
logger.warning(w)
self.errors.append(w)
#s = _('A problem encountered with revision %(revid)d may have influenced the word count for this revision: <nowiki>%(problems)s</nowiki> ')
#s = _('Et problem med revisjon %d kan ha påvirket ordtellingen for denne: <nowiki>%s</nowiki> ')
del mt1
del mt0
# except DanmicholoParseError as e:
# log("!!!>> FAIL: %s @ %d" % (self.article().name, self.revid))
# self._wordcount = 0
# #raise
return self._wordcount
@property
def new(self):
return self.parentid == 0 or (self.parentredirect and not self.redirect)
@property
def redirect(self):
return bool(self.article().site().redirect_regexp.match(self.text))
@property
def parentredirect(self):
return bool(self.article().site().redirect_regexp.match(self.parenttext))
def get_link(self, homesite):
""" returns a link to revision """
q = OrderedDict([('oldid', self.revid)])
if not self.new:
q['diff'] = 'prev'
if self.article().site().host == homesite.host:
host_prefix = ''
else:
host_prefix = '//' + self.article().site().host
return host_prefix + self.article().site().site['script'] + '?' + urllib.parse.urlencode(q)
def get_parent_link(self):
""" returns a link to parent revision """
q = OrderedDict([('oldid', self.parentid)])
return '//' + self.article().site().host + self.article().site().site['script'] + '?' + urllib.parse.urlencode(q)
def add_point_deduction(self, points, reason):
logger.info('Revision %s: Removing %d points for reason: %s', self.revid, points, reason)
self.point_deductions.append([points, reason])
|
tehmaze/parser | parser/symbol.py | Python | mit | 977 | 0.002047 | # Crude symbol based top down operator presedence parser, as originally
# implemented by Vaughan Pratt[1] and Douglas Crockford[2].
#
# [1]: http://doi.acm.org/10.1145/512927.512931
# [2]: http://javascript.crockford.com/tdop/tdop.html
import re
class SymbolBase(object):
ident = None
value = None
first = None
def __init__(self, parser, value=None):
self.parser = parser
self.value = value
# Alias methods for easy access
fo | r method in ['advance', 'expression']:
setattr(self, method, getattr(self.parser, method))
def nud(self):
'''
Null declaration, is used when a token appears at the beginning
of a language construct.
'''
raise SyntaxError('Syntax error %r' % (self.ident,))
def led(self, left):
'''
Left denotation, is used when it appears inside the construct.
| '''
raise SyntaxError('Unknown operator %r' % (self.ident,))
|
lebabouin/CouchPotatoServer-develop | couchpotato/core/providers/trailer/vftrailers/youtube_dl/extractor/brightcove.py | Python | gpl-3.0 | 5,375 | 0.003539 | # encoding: latin-1
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
find_xpath_attr,
compat_urlparse,
ExtractorError,
)
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
u'file': u'2371591881001.mp4',
u'md5': u'9e80619e0a94663f0bdc849b4566af19',
u'note': u'Test Brightcove downloads and detection in GenericIE',
u'info_dict': {
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
u'uploader': u'8TV',
u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
u'file': u'1785452137001.flv',
u'info_dict': {
u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.',
u'uploader': u'Oracle',
},
},
]
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
object_doc = xml.etree.ElementTree.fromstring(object_str)
assert u'BrightcoveExperience' in object_doc.attrib['class']
params = {'flashID': object_doc.attrib['id'],
'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
}
playerKey = find_xpath_attr(object_doc, './param', 'name', 'playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey.attrib['value']
videoPlayer = find_xpath_attr(object_doc, './param', 'name', '@videoPlayer')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer.attrib['value']
data = compat_urllib_parse.urlencode(params)
return cls._FEDERATED_URL_TEMPLATE % data
def _real_extr | act(self, url):
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
retu | rn self._get_video_info(videoPlayer[0], query_str)
else:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
def _get_video_info(self, video_id, query):
request_url = self._FEDERATED_URL_TEMPLATE % query
webpage = self._download_webpage(request_url, video_id)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
player_key, u'Downloading playlist information')
playlist_info = json.loads(playlist_info)['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
return self.playlist_result(videos, playlist_id=playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
info = {
'id': video_info['id'],
'title': video_info['displayName'],
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
}
renditions = video_info.get('renditions')
if renditions:
renditions = sorted(renditions, key=lambda r: r['size'])
best_format = renditions[-1]
info.update({
'url': best_format['defaultURL'],
'ext': 'mp4',
})
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
'ext': 'flv',
})
else:
raise ExtractorError(u'Unable to extract video url for %s' % info['id'])
return info
|
wangjiezhe/FetchNovels | novel/sources/sto.py | Python | gpl-3.0 | 2,128 | 0.004008 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from pyquery import PyQuery
from novel import serial, utils
BASE_URL = 'http://www.sto.cc/{}-1/'
PAGE_URL = 'http://www.sto.cc/{}-{}/'
class StoTool(utils.Tool):
def __init__(self):
super().__init__()
word_list = (
's思s兔s網s文s檔s下s載s與s在s線s閱s讀s',
's本s作s品s由s思s兔s網s提s供s下s載s與s在s線s閱s讀s',
's本s作s品s由s思s兔s在s線s閱s讀s網s友s整s理s上s傳s',
's思s兔s在s線s閱s讀s',
's思s兔s文s檔s共s享s與s在s線s閱s讀s',
)
symbol_list = (
'\^_\^', ':-\)', '\^o\^', '-_-!', |
'││', '//', '\$\$',
)
symbols = '|'.join(symbol_list).join(('(.|', ')'))
pats = (symbols.join(w.split('s')) for w in word_list)
symbol_extras = ('',)
self.remove_extras.extend(
(re.compile(pat) for pat in pats)
| )
self.remove_extras.extend(
(re.compile(pat) for pat in symbol_extras)
)
class Sto(serial.SerialNovel):
def __init__(self, tid):
super().__init__(utils.base_to_url(BASE_URL, tid), '#BookContent',
tid=tid)
self.tool = StoTool
def get_title_and_author(self):
st = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('name') == 'keywords'
).attr('content')
return re.match(r'(.*?),(.*?),.*', st).groups()
@property
def chapter_list(self):
st = re.search(
r'ANP_goToPage\("Page_select",(\d+),(\d+),1\);', self.doc.html())
if st.group(1) == self.tid:
page_num = int(st.group(2))
else:
raise Exception('Something strange may happened.')
return [(i + 1, PAGE_URL.format(self.tid, i + 1), '第{:d}頁'.format(i + 1))
for i in range(page_num)]
def get_intro(self):
intro = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('name') == 'description'
).attr('content')
return intro
|
makersauce/ConstructSecur | Overlord/host.py | Python | mit | 8,573 | 0.011781 | ##TODO:
#
#getLog(day) FInish it
#machines new
#machines mod
#
import os
import datetime
from flask import Flask, jsonify, render_template, request, redirect, url_for
from dblayer import DBlayer
db = DBlayer(DEBUG=True)
app = Flask(__name__, static_folder='web/static', static_url_path='')
app.template_folder = "web"
SECURITYLOGS = "logs/"
ADMINIDENT = None
##START MAIN PAGE
@app.route("/")
def landing():
now = datetime.datetime.now()
log = getLog(now.strftime('%Y%m%d'))
return render_template('landing.html', logs=log)
@app.route("/logs")
def logs():
logList = getLogList()
print logList
return render_template('loglist.html',logList=logList)
@app.route('/logs/<logurl>')
def show_log(logurl):
try:
if os.path.isfile(os.path.join(SECURITYLOGS,logurl)):
date = os.path.splitext(logurl)[0]
logs = getLog(date)
return render_template('log.html',logs=logs)
except:
return "<h1>Oops!</h1><p>Looks like there was an error retrieving that log. Sorry about that.</p>"
@app.route("/users")
def users():
categories = ['name','ident','class']
users = db.readAllUsers()
return render_template('users.html',users=users,categories=categories)
@app.route('/addUser',methods=['POST'])
def add_user():
new = {}
name = request.form['name']
new['name'] = name
ident = request.form['ident']
new['ident'] = ident
userclass = request.form['class']
new['class'] = userclass
try:
hours = request.form['hours']
new['hours'] = hours
except:
print 'no hours in form'
try:
last = request.form['last']
new['last'] = last
except:
print 'no last in form'
##log ADDED USER xxx on XXXX
added = db.addUser(new,'1337hax')
return redirect(url_for('users'))
@app.route('/user-class',methods=['POST'])
def user_class():
ident = request.form['ident']
userClass = db.readUser(ident)['class']
print "user class is {0}".format(userClass)
##log LOGIN ATTEMPT by XXXX at XXXX
return jsonify(userClass=userClass)
@app.route("/machines")
def machines():
categories = ['name','ident','classes','timeout']
machines = db.readAllMachines()
return render_template('machines.html',machines=machines,categories=categories)
@app.route('/addMachine',methods=['POST'])
def add_machine():
new = {}
name = request.form['name']
new['name'] = name
ident = request.form['ident']
new['ident'] = ident
classes = request.form['classes'].split(',')
classes = [n.strip(' ') for n in classes]#Strip whitespace from classes
new['classes'] = classes
timeout = request.form['timeout']
new['timeout'] = timeout
try:
hours = request.form['hours']
new['hours'] = hours
except:
print 'no hours in form'
try:
last = request.form['last']
new['last'] = last
except:
print 'no last in form'
print new
added = db.addMachine(new,'1337hax')
##log ADD machine XXXX at XXXX
return redirect(url_for('machines'))
@app.route("/remove<string:index>")
def remove(index):
print index
if db.deleteUser(index,'1337hax'):
print 'users'
return redirect('/users')
elif db.deleteMachine(index,'1337hax'):
print 'machines'
return redirect('/machines')
return "<h1>Ooops!</h1>"
def getLog(day):
f = day+'.log'
if os.path.isfile(os.path.join(SECURITYLOGS,f)):
##PARSE LOG FILE
log = [{'message':'hello','time':'1:30a'},{'message':'yes','time':'1:20a'},{'message':'fart','time':'1:40a'}]
return log
else:
log = [{'message':'There has been no activity today'}]
return log
def getLogList():
try:
onlyfiles = [ f for f in os.listdir(SECURITYLOGS) if os.path.isfile(os.path.join(SECURITYLOGS,f)) ]
logList = []
for f in onlyfiles:
path = os.path.join(SECURITYLOGS,f)
try:
date = datetime.strptime(f, "%Y%m%d.log")
filename = datetime.strftime(date, "%A %B %d, %Y")
except:
filename = f
logList.append({'name':filename, 'path':path})
return logList
except:
log = [{'message':'There has been no activity ever'}]
return logList
@app.route("/machine-init", methods=['POST'])
def machine_init():
print request.method
print request.form['ident']
if request.method == "POST":
machine = db.readMachine(request.form['ident'])
timeout = machine['timeout']
classes = machine['classes']
response = jsonify(timeout=timeout,classes=classes)
##log "machine XXXX checked in at XXXXX"
return response
return 'Request did not POST'
@app.route("/login")
def login():
ADMIN_IDENT="1337hax"
print "logged in"
'''
@app.route('/_get_tweets')
def get_tweets():
filter = request.args.get('filter', type=str)
n = request.args.get('n', 0, type=int)
if (filter=='none'):
result = db.tweets.find().sort('created', -1)[n]
else:
result = db.tweets.find({'merchanthandle':filter}).sort('created', -1)[n]
return jsonify(text=result['text'],created=result['created'],merchant=result['merchanthandle'])
@app.route('/_get_merchant')
def get_merchant():
name = request.args.get('handle', type=str) ##finds the most recently updated merchants
result = db.merchants.find_one({'twitterhandle':name})
return jsonify(name=result['name'],description=result['description'],handle=result['twitterhandle'],tid=result['twitterid'],updated=result['lastupdated'],geo=result['lastgeo'],category=result['category'])
##END MAIN PAGE
##MAP
@app.route('/_recent_merchants')
def recent_merchants():
n = request.args.get('n', 0, type=int) ##finds the most recently updated merchants
result = db.merchants.find | ().sort('lastupdated', -1)[n]
print result
return jsonify(name=result['name'],description=result['description'],handle=result['twitterhandle'],tid=result['twitterid'],updated=result['lastupdated'],geo=result['lastgeo'],category=result['category'])
##END MAP
##ADMIN PAGE
@app.route('/admin/_add_merchant')
def add_vendor():
name = reques | t.args.get('name', 0, type=str)
tline = request.args.get('tagline', 0, type=str)
handle = request.args.get('twitterhandle', 0, type=str)
category = request.args.get('category',type=str)
new = {'name': name, 'category': category, 'twitterhandle': handle, 'description': tline}
print new
added = db.addmerchant(new)
return jsonify(result=added)
#@app.route('/admin/_consume_tweets')
#def consume_tweets():
# print 'im in'
# cons = TweetConsumer()
# print 'init'
# new = cons.consume()
# print 'nom'
# return str(new)
@app.route('/admin/_add_category', methods=['POST'])
def catadd():
if request.method == 'POST':
name = request.form['catname']
file = request.files['caticon']
if not name: return jsonify({"success":False,"error":"No Name"})
if not file: return jsonify({"success":False,"error":"No File"})
if file and allowed_file(file.filename):
filename = os.path.join(app.config['CATEGORY_ICON_DIR'], "%s.%s" % (name, file.filename.rsplit('.', 1)[1]))
file.save(filename)
new = {'name': name, 'filename':filename}
db.addcategory(new)
return jsonify({"success":True})
@app.route('/admin/_preview_category', methods=['POST'])
def catpreview():
if request.method == 'POST':
file = request.files['caticon']
if file and allowed_file(file.filename):
filename = os.path.join(app.config['TEMP_UPLOAD'], "%s.%s" % (file.filename.rsplit('.', 1)[0], file.filename.rsplit('.', 1)[1]))
file.save(filename)
return jsonify({"success":('/temp/'+(file.filename.rsplit('.', 1)[0] + '.' + file.filename.rsplit('.', 1)[1]))})
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_IMAGES
@app.route('/_get_categories')
def get_categories( |
ARL-UTEP-OC/emubox | workshop-manager/bin/RequestHandler/client_updater.py | Python | gpl-2.0 | 2,004 | 0.000998 | import sys
import time
import logging
from socketio import socketio_manage
from socketio.mixins import BroadcastMixin
from socketio.namespace import BaseNamespace
from DataAggregation.webdata_aggregator import getAvailableWorkshops
logger = logging.getLogger(__name__)
std_out_logger = logging.StreamHandler(sys.stdout)
logger.addHandler(std_out_logger)
def broadca | st_msg(server, ns_name, event, *args):
pkt = dict(type="event",
name=event,
args=args,
endpoint=ns_name)
for sessid, socket in server.sockets.iteritems():
socket.send_packet(pkt)
def workshops_monitor(server):
sizes = []
workshops = getAvailableWorkshops()
for w in workshops:
| tmp = [w.workshopName, w.q.qsize()]
sizes.append(tmp)
broadcast_msg(server, '', "sizes", tmp)
while True:
logger.info("Participants viewing frontend:" + str(len(server.sockets)))
workshops_available = []
curr_workshops = getAvailableWorkshops()
for w in curr_workshops:
workshops_available.append([w.workshopName, w.q.qsize()])
wq = filter(lambda x: x[0] == w.workshopName, sizes)[0]
if wq[1] != w.q.qsize():
wq[1] = w.q.qsize()
logging.info("client_updater: New update being pushed to clients: " + str(wq))
broadcast_msg(server, '', 'sizes', wq)
logger.info("Workshops available:" + str(workshops_available))
time.sleep(1)
class RequestHandlerApp(object):
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith('/socket.io'):
socketio_manage(environ, {'': QueueStatusHandler})
class QueueStatusHandler(BaseNamespace, BroadcastMixin):
def on_connect(self):
sizes = []
workshops = getAvailableWorkshops()
for w in workshops:
tmp = [w.workshopName, w.q.qsize()]
sizes.append(tmp)
self.emit('sizes', tmp)
|
matt-oak/Project_Euler | 17_number_letter_counts.py | Python | mit | 3,740 | 0.04893 | # imports
import sys
import math
# main function
def main(argv):
summation = 0
#one ... ninety nine
for i in range(1, 100):
summation = summation + to_words(i) |
#854 characers for 1 - 99
#print summation
#one hundred and ...
one_hundreds = (13 | *99)+summation+10
two_hundreds = (13*99)+summation+10
three_hundreds = (15*99)+summation+12
four_hundreds = (14*99)+summation+11
five_hundreds = (14*99)+summation+11
six_hundreds = (13*99)+summation+10
seven_hundreds = (15*99)+summation+12
eight_hundreds = (15*99)+summation+12
nine_hundreds = (14*99)+summation+11
thousand = 11
total = summation + one_hundreds + two_hundreds + three_hundreds + four_hundreds + five_hundreds + six_hundreds + seven_hundreds + eight_hundreds + nine_hundreds + thousand
print total
return
#insert other functions here
def to_words(num):
count = 0
char = str(num)
if char == "10":
count = count + 3
return count
elif char == "11":
count = count + 6
return count
elif char == "12":
count = count + 6
return count
elif char == "13":
count = count + 8
return count
elif char == "14":
count = count + 8
return count
elif char == "15":
count = count + 7
return count
elif char == "16":
count = count + 7
return count
elif char == "17":
count = count + 9
return count
elif char == "18":
count = count + 8
return count
elif char == "19":
count = count + 8
return count
if char == "20":
count = count + 6
return count
elif char == "30":
count = count + 6
return count
elif char == "40":
count = count + 5
return count
elif char == "50":
count = count + 5
return count
elif char == "60":
count = count + 5
return count
elif char == "70":
count = count + 7
return count
elif char == "80":
count = count + 6
return count
elif char == "90":
count = count + 6
return count
if char == "1":
count = count + 3
return count
elif char == "2":
count = count + 3
return count
elif char == "3":
count = count + 5
return count
elif char == "4":
count = count + 4
return count
elif char == "5":
count = count + 4
return count
elif char == "6":
count = count + 3
return count
elif char == "7":
count = count + 5
return count
elif char == "8":
count = count + 5
return count
elif char == "9":
count = count + 4
return count
if char[len(char) - 2] == "2":
count = count + 6
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "3":
count = count + 6
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "4":
count = count + 5
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "5":
count = count + 5
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "6":
count = count + 5
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "7":
count = count + 7
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "8":
count = count + 6
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
elif char[len(char) - 2] == "9":
count = count + 6
for i in range(1, 10):
if i == int(char[len(char) - 1]):
count = count + to_words(i)
return count
# maint entry point
if __name__ == "__main__":
main(sys.argv)
|
hackerpals/Python-Tutorials | Kids-Pthon-Workshops/guessing-game/ex2/call-op.py | Python | gpl-3.0 | 591 | 0.011844 | #!/usr/bin/env python3
"""
Call Operators in Python
"""
#Setup our variables
num1 = 1
num2 = 2
var1 = 'a'
var2 = 'b'
'''
+ / Additions / Can combine strings or numbes
'''
totalNum = num1 + num2
totalVar = var1 + var2
print(totalNum, totalVar )
'''
- / Subtraction
'''
totalNum = num1 - num2
'''
* / Multiplication
'''
'''
| / / Division
'''
'''
% / Remainder(Modulo)
'''
'''
** / Powe | r
'''
'''
= / Assignment
'''
'''
== / Equality
'''
'''
!= / Not Equal
'''
'''
> / Greater than
'''
'''
< / Less than
'''
'''
&(or and) / and
'''
'''
| or / or
'''
|
Elchi3/kuma | kuma/attachments/tests/conftest.py | Python | mpl-2.0 | 809 | 0 | import datetime
import pytest
from django.core.files.base import ContentFile
from kuma.attachments.models import Attachment, AttachmentRevision
@pytest.fixture
def file_attachment(db, wiki_user):
file_id = 97
filename = "test.txt"
title = "Test text file"
attachment = Attachment(title=title, mindtouch_attachment_id=file_id)
attachment.save()
r | evision = AttachmentRevision(
title=title,
is_approved=True,
attachment=attachment,
mime_type="text/plain",
description="Initial upload",
created=datetime.datetime.now(),
)
revision.creator = wiki_user
revision.file.save(filename, ContentFile(b"This is only a test."))
revisio | n.make_current()
return dict(attachment=attachment, file=dict(id=file_id, name=filename),)
|
mmottahedi/neuralnilm_prototype | scripts/e234.py | Python | mit | 4,812 | 0.004572 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, Fea | turePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
P | ATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
"""
def exp_a(name):
# global source
# source = RealApplianceSource(
# filename='/data/dk3810/ukdale.h5',
# appliances=[
# ['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
# 'dish washer',
# ['washer dryer', 'washing machine']
# ],
# max_appliance_powers=[300, 500, 200, 2500, 2400],
# on_power_thresholds=[5] * 5,
# max_input_power=5900,
# min_on_durations=[60, 60, 60, 1800, 1800],
# min_off_durations=[12, 12, 12, 1800, 600],
# window=("2013-06-01", "2014-07-01"),
# seq_length=1500,
# output_one_appliance=False,
# boolean_targets=False,
# train_buildings=[1],
# validation_buildings=[1],
# skip_probability=0.7,
# n_seq_per_batch=10,
# subsample_target=5,
# input_padding=4,
# include_diff=False,
# clip_appliance_power=False,
# lag=32
# )
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid,
'W': Uniform(1)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
eseraygun/python-entities | tests/field.py | Python | bsd-3-clause | 5,621 | 0 | import unittest
from entities import *
class TestField(unittest.TestCase):
def test_make_default(self):
field = Field(default=None, null=True)
self.assertEqual(field.make_default(), None)
field = Field(default=lambda: 1, null=True)
self.assertEqual(field.make_default(), 1)
field = Field(default=2, null=True)
self.assertEqual(field.make_default(), 2)
field = Field(default=None, null=False)
| self.assertRaises(ValueError, field.make_default)
def test_full_name(self):
class Foo(Entity):
field = ListField(DynamicField())
self.assertEqual(Foo.field.full_name(), 'field')
self.assertEqual(Foo.field.item_field.full_name(), 'field.<item>')
def test_validate(self):
field = Field(null= | True)
self.assertEqual(field.validate(None), None)
field = DynamicField(IntegerField, null=True)
self.assertEqual(field.validate(None), None)
field = DynamicField(IntegerField, null=False)
self.assertRaises(ValidationError, lambda: field.validate(None))
self.assertRaises(ValidationError, lambda: field.validate(1.0))
def test_keyify(self):
field = Field()
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify(1), 1)
self.assertEqual(field.keyify('foo'), 'foo')
def test_get_set(self):
class Foo(Entity):
field = IntegerField(0)
self.assertIsInstance(Foo.field, Field)
entity = Foo()
self.assertIsInstance(entity.field, int)
entity.field = 1
self.assertEqual(entity.field, 1)
def test_repr(self):
class Foo(Entity):
field = Field()
self.assertEqual(repr(Foo.field), "Field(name='field')")
class TestDynamicField(unittest.TestCase):
def test_make_empty(self):
empty = DynamicField().make_empty()
self.assertIsInstance(empty, object)
class TestBooleanField(unittest.TestCase):
def test_make_empty(self):
empty = BooleanField().make_empty()
self.assertIsInstance(empty, bool)
self.assertEqual(empty, False)
class TestIntegerField(unittest.TestCase):
def test_make_empty(self):
empty = IntegerField().make_empty()
self.assertIsInstance(empty, int)
self.assertEqual(empty, 0)
class TestFloatField(unittest.TestCase):
def test_make_empty(self):
empty = FloatField().make_empty()
self.assertIsInstance(empty, float)
self.assertEqual(empty, 0.0)
class TestStringField(unittest.TestCase):
def test_make_empty(self):
empty = StringField().make_empty()
self.assertIsInstance(empty, basestring)
self.assertEqual(empty, u'')
class TestDateField(unittest.TestCase):
def test_make_empty(self):
empty = DateField().make_empty()
self.assertIsInstance(empty, datetime.date)
class TestTimeField(unittest.TestCase):
def test_make_empty(self):
empty = TimeField().make_empty()
self.assertIsInstance(empty, datetime.datetime)
class TestCollectionField(unittest.TestCase):
def test_make_empty(self):
empty = ListField().make_empty()
self.assertIsInstance(empty, list)
self.assertEqual(len(empty), 0)
def test_validate(self):
field = ListField()
self.assertEqual(field.validate([1, 2.0, '3']), None)
field = ListField(IntegerField())
self.assertEqual(field.validate([1, 2, 3]), None)
self.assertRaises(ValidationError,
lambda: field.validate([1, 2, '3']))
self.assertRaises(MultipleErrors,
lambda: field.validate([1, 2.0, '3']))
field = ListField(IntegerField(), recursive=True)
self.assertEqual(field.validate([1, [2, 3]]), None)
self.assertRaises(ValidationError,
lambda: field.validate([1, [2, '3']]))
self.assertRaises(MultipleErrors,
lambda: field.validate([1, [2.0, '3']]))
def test_keyify(self):
field = ListField()
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify([1, 2, 3]), (1, 2, 3))
field = ListField(ListField())
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify([[1], [2, 3]]), ((1,), (2, 3)))
class TestListField(unittest.TestCase):
def test_make_empty(self):
empty = ListField().make_empty()
self.assertIsInstance(empty, list)
self.assertEqual(len(empty), 0)
class TestSetField(unittest.TestCase):
def test_make_empty(self):
empty = SetField().make_empty()
self.assertIsInstance(empty, set)
self.assertEqual(len(empty), 0)
class TestDictField(unittest.TestCase):
def test_make_empty(self):
empty = DictField().make_empty()
self.assertIsInstance(empty, dict)
self.assertEqual(len(empty), 0)
def test_validate(self):
field = DictField(IntegerField())
self.assertEqual(field.validate({'1': 1}), None)
def test_keyify(self):
field = DictField()
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify({'1': 1, '2': 2}), (('1', 1), ('2', 2)))
field = DictField(DictField())
self.assertEqual(field.keyify(None), None)
self.assertEqual(
field.keyify({'1': {'1.1': 11}, '2': {'2.1': 21}}),
(('1', (('1.1', 11),)), ('2', (('2.1', 21),)))
)
if __name__ == '__main__':
unittest.main()
|
leilihh/novaha | nova/cert/manager.py | Python | apache-2.0 | 2,464 | 0.000406 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of t | he License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed | on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from oslo import messaging
from nova import crypto
from nova import manager
class CertManager(manager.Manager):
target = messaging.Target(version='2.0')
def __init__(self, *args, **kwargs):
super(CertManager, self).__init__(service_name='cert',
*args, **kwargs)
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project."""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project."""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project."""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
|
grandquista/ReQL-Core | setup.py | Python | apache-2.0 | 1,701 | 0 | from setuptools import setup, Extension
with open('README.md') as istream:
long_description = istream.read()
tests_require = ['pytest']
libReQL = Extension(
'libReQL',
include_dirs=['src'],
sources=[
'src/Python/connection.c',
'src/Python/cursor.c',
'src/Python/query.c',
'src/Python/ReQL.c',
'src/Python/types.c',
'src/reql/char.c',
'src/reql/connection.c' | ,
'src/reql/cursor.c',
'src/reql/decode.c',
'src/reql/encode.c',
'src/reql/error.c',
'src/reql/query.c'
]
)
setup(
author='Adam Grandquist',
author_email='grandquista@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: Englis | h',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database :: Front-Ends'
],
description='A Python driver for RethinkDB.',
entry_points={
'console_scripts': [
]
},
extras_require={
'testing': tests_require
},
ext_modules=[libReQL],
keywords='',
license='Apache',
long_description=long_description,
name='libReQL',
package_data={
},
tests_require=tests_require,
url='https://github.com/grandquista/ReQL-Core',
version='1.0.0',
zip_safe=True
)
|
levilucio/SyVOLT | UML2ER/transformation/H02Package2ERModel.py | Python | mit | 1,402 | 0.03709 | from core.himesis import Himesis
import uuid
class H02Package2ERModel(H | imesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule 02Package2ERModel.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(H02Package2ERModel, self).__init__(name='H02Package2ERModel', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """02Package2ERModel"""
self["GUID__"] = uuid.uuid3 | (uuid.NAMESPACE_DNS,'02Package2ERModel')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """02Package2ERModel"""
# match class Package(Package) node
self.add_node()
self.vs[3]["mm__"] = """Package"""
self.vs[3]["attr1"] = """+"""
# apply class ERModel(ERModel) node
self.add_node()
self.vs[4]["mm__"] = """ERModel"""
self.vs[4]["attr1"] = """1"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Package(Package)
(1,4), # applymodel -> apply_classERModel(ERModel)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = [((4,'name'),(3,'name')),]
|
titasakgm/brc-stock | openerp/addons/base_report/__openerp__.py | Python | agpl-3.0 | 1,858 | 0.011841 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License | for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Base Report to Improve the Reports",
"version": "1.0",
"description": """
Improvements to Report's Modules
================================
Make some improves | on ir.actions.report.xml object in order to support many kinds of reports engines. Next the improves:
* Add a text field to edit and customize the report on the fly.
* Add a check field to push the report on the toolbar window
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Reporting",
"depends": [
"base",
],
"data":[
"ir_actions_view.xml",
"ir_model_view.xml",
"wizard/params_view.xml",
],
"demo_xml": [
],
"update_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eviljeff/zamboni | mkt/submit/tests/test_views.py | Python | bsd-3-clause | 38,831 | 0 | # -*- coding: utf-8 -*-
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import mkt
from mkt.constants.applications import DEVICE_TYPES
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.reviewers.models import EscalationQueue
from mkt.site.fixtures import fixture
from mkt.site.tests import formset, initial, TestCase, user_factory
from mkt.site.tests.test_utils_ import get_image_path
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.translations.models import Translation
from mkt.users.models import UserNotification, UserProfile
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AddonDeviceType, AddonUser, AppFeatures, Webapp
class TestSubmit(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
self.login(self.user.email)
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(email='regular@mozilla.com')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
| def test_page(self):
r = self.client.get(self.url)
| eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
subscribe_mock.assert_called_with(
self.user.email, 'app-dev', lang='en-US',
country='restofworld', format='H',
source_url='http://testserver/developers/submit')
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
response = self.client.post(self.url, data, follow=True)
eq_(response.status_code, 200)
if not expect_erro |
ojengwa/sympy | sympy/core/tests/test_expr.py | Python | bsd-3-clause | 54,290 | 0.000682 | from __future__ import division
from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, Lambda, expand, diff, O)
from sympy.core.function import AppliedUndef
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.core.compatibility import xrange
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, n, t, u, x, y, z
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if | isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__ | (self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for x in all_objs:
for y in all_objs:
s(x, y)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
def test_relational():
assert (pi < 3) is S.false
assert (pi <= 3) is S.false
assert (pi > 3) is S.true
assert (pi >= 3) is S.true
assert (-pi < 3) is S.true
assert (-pi <= 3) is S.true
assert (-pi > 3) is S.false
assert (-pi >= 3) is S.false
assert (x - 2 < x - 3) is S.false
def test_relational_assumptions():
from sympy import Lt, Gt, Le, Ge
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=False)
m2 = Symbol("m2", nonpositive=False)
m3 = Symbol("m3", positive=False)
m4 = Symbol("m4", nonnegative=False)
assert (m1 < 0) is S.false
assert (m2 <= 0) is S.false
assert (m3 > 0) is S.false
assert (m4 >= 0) is S.false
def test_relational_noncommutative():
from sympy import Lt, Gt, Le, Ge
A, B = symbols('A,B', commutative=False)
assert (A < B) == Lt(A, B)
assert (A <= B) == Le(A, B)
assert (A > B) == Gt(A, B)
assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) == oo
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x
def test_as_leading_term4():
# see issue 6843
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(Fun |
huajiahen/hotspot | backend/Busy/BusyJpush.py | Python | mit | 3,607 | 0.019324 | # -*- coding:utf-8 -*-
import copy
import urllib, urllib2
import json
import logging
import hashlib
import time
logger = logging.getLogger('jpush_py')
SEND_MSG_URL = 'http://api.jpush.cn:8800/sendmsg/v2/sendmsg'
REQUIRED_KEYS = set(('receiver_type', 'msg_type', 'msg_content','platform'))
class JPushPy(object):
'''jpush's python client'''
_sendno = None
def __init__(self,app_key,master_secret):
if self.__class__.sendno is None:
self.__class__.sendno = int (time.time())
self._app_key = app_key
self._master_secret = master_secret
@classmethod
def generate_sendno(cls):
cls._sendno +=1
return cls._sendno
def generate_verification_code(self,params):
'''生成验证码'''
m = hashlib.md5()
str = "%s%s%s%s" % (params['sendno'], params['receiver_type'], params.get('receiver_value',''),self._master_secret)
m.update(str)
return m.hexdigest().upper() #??????
def generate_params(self,params):
'''
生成新的params
'''
new_params = dict()
sendno = params.get('sendno',self.generate_sendno())
for k,v in params.items():
if k =='msg_content' and isinstance(v,basestrin | g):
if params['msg_type'] == 1:
v = dict(
n_content = v
)
if isinstance(v,dict):
new_params[k] = json.dumps(v)
elif isinstance(v,unicode):
new_params[k] = v.encode('utf-8')
else:
new_params[k] = v
new_params['sendno'] = sendno
new_params['app_key'] = self.app_key
new_params['verification_code'] = sel | f.generate_verification_code(new_params)
def send_msg(self, params, timeout = None):
'''
发送消息
'''
#Debug
logger.debug('params: '+repr(params))
if len(REQUIRED_KEYS & set(params.keys())) != len(REQUIRED_KEYS):
return dict(
sendno = params.get('sendno',None),
errcode = -1000,
errmsg = u'参数错误',
)
new_params = self.generate_params(params)
logger.debug('new_params: ' + repr(new_params))
encode_params = urllib.urlencode(new_params)
try:
data = urllib2.urlopen(SEND_MSG_URL, encode_params, timeout).read()
except Exception, e:
logger.error('exception occur.msg[%s], traceback[%s]' %(str(e),__import__('traceback').format_exc()))
return dict(
sendno = new_params['sendno'],
errcode = -1001,
errmsg = u'网络错误',
)
try:
jdata = json.loads(data)
except Exception,e:
logger.error('exception occur.msg[%s], traceback[%s]' % (str(e), __import__('traceback').format_exc()))
return dict(
sendno = new_params['sendno'],
errcode = -1002,
errmsg = u'返回包解析错误',
)
return jdata
if __name__=='__main__':
import logging
import BusyJpush
BusyJpush.logger.addHandler(logging.StreamHandler())
BusyJpush.logger.setLevel(logging.DEBUG)
client = BusyJpush.JPushPy('your app_key','your master secret')
params = dict(
receiver_type = 4,
msg_type = 1,
msg_content = u'富士,你为什么放弃治疗~',
platform = 'ios' #大小写?
)
print client.send_msg(params,10)
|
stgeorges/pythonscripts | object_names_No_occurrences.py | Python | unlicense | 2,663 | 0.010139 | #*******************************************************************************************#
#********* Object names and its number of occurrences **************************************#
#********* by Djordje Spasic ***************************************************************#
#********* issworld2000@yahoo.com 08-Feb-2014 **********************************************#
""" Function shows all object's assigned names and counts the number of its occurrences.
Results could be exported to .csv file or just presented on the screen in a form of a message box """
import rhinoscriptsyntax as rs
def exportNameOcc(_objs_ids):
# extracting object names
namesColumn = []
if _objs_ids:
for obj in _objs_ids:
name = rs.ObjectName(obj)
if name:
namesColumn.append(name)
if len(namesColumn) > 0:
namesColumnU = set(namesColumn)
namesColumnU = list(namesColumnU)
# number of occurrences
countColumn = []
for nU in namesColumnU:
number = namesColumn.count(nU)
countColumn.append(number)
mergedL = []
for i in range(len(namesColumnU)):
mergedL.append((countColumn[i], namesColumnU[i]))
mergedL.sort(reverse=True)
# exporting
export = rs.GetInteger("Export results to .csv file or just show them on the screen? CSV(1), Screen(0)", number = 0, minimum = 0, maximum = 1)
if export == 0:
message = "Object name - No. of occurrences\n \n"
for i in range(len(namesColumnU)):
print name | sColumnU[i]
message += " %s - %i\n" % (mergedL[i][1], mergedL[i][0])
rs.MessageBox(message, 0, "Results")
else:
filename = rs.SaveFileName("Save csv file","*.csv||", None, "ObjectNamesOccurrences", "csv")
file = open(filename, 'w')
headerline = "Names, Occurrences\n"
file.write(headerline)
for i in range(len(namesColumnU)):
| name = mergedL[i][1]
occ = mergedL[i][0]
line = "%s,%i \n" %(name, occ)
file.write(line)
file.close()
print "Done"
else:
print "You do not have named objects. Function terminated"
return
else:
print "Your 3dm file is empty or you did not select objects. Function terminated"
return
# function call
objs_ids = rs.ObjectsByType(0)
exportNameOcc(objs_ids)
|
jjhelmus/berryconda | recipes/faker/run_test.py | Python | bsd-3-clause | 7,740 | 0 | import faker
import faker.providers
import faker.providers.address
import faker.providers.address.cs_CZ
import faker.providers.address.de_DE
import faker.providers.address.el_GR
import faker.providers.address.en
import faker.providers.address.en_AU
import faker.providers.address.en_CA
import faker.providers.address.en_GB
import faker.providers.address.en_US
import faker.providers.address.es
import faker.providers.address.es_ES
import faker.providers.address.es_MX
import faker.providers.address.fa_IR
import faker.providers.address.fi_FI
import faker.providers.address.fr_CH
import faker.providers.address.fr_FR
import faker.providers.address.hi_IN
import faker.providers.address.hr_HR
import faker.providers.address.it_IT
import faker.providers.address.ja_JP
import faker.providers.address.ko_KR
import faker.providers.address.ne_NP
import faker.providers.address.nl_BE
import faker.providers.address.nl_NL
import faker.providers.address.no_NO
import faker.providers.address.pl_PL
import faker.providers.address.pt_BR
import faker.providers.address.pt_PT
import faker.providers.address.ru_RU
import faker.providers.address.sk_SK
import faker.providers.address.sl_SI
import faker.providers.address.sv_SE
import faker.providers.address.uk_UA
import faker.providers.address.zh_CN
import faker.providers.address.zh_TW
import faker.providers.barcode
import faker.providers.barcode.en_US
import faker.providers.color
import faker.providers.color.en_US
import faker.providers.color.uk_UA
import faker.providers.company
import faker.providers.company.bg_BG
import faker.providers.company.cs_CZ
import faker.providers.company.de_DE
import faker.providers.company.en_US
import faker.providers.company.es_MX
import faker.providers.company.fa_IR
import faker.providers.company.fi_FI
import faker.providers.company.fr_CH
import faker.providers.company.fr_FR
import faker.providers.company.hr_HR
import faker.providers.company.it_IT
import faker.providers.company.ja_JP
import faker.providers.company.ko_KR
import faker.providers.company.no_NO
import faker.providers.company.pt_BR
import faker.providers.company.pt_PT
import faker.providers.company.ru_RU
import faker.providers.company.sk_SK
import faker.providers.company.sl_SI
import faker.providers.company.sv_SE
import faker.providers.company.zh_CN
import faker.providers.company.zh_TW
import faker.providers.credit_card
import faker.providers.credit_card.en_US
import faker.providers.currency
import faker.providers.currency.en_US
import faker.providers.date_time
import faker.providers.date_time.en_US
import faker.providers.file
import faker.providers.file.en_US
import faker.providers.internet
import faker.providers.internet.bg_BG
import faker.providers.internet.bs_BA
import faker.providers.internet.cs_CZ
import faker.providers.internet.de_AT
import faker.providers.internet.de_DE
import faker.providers.internet.el_GR
import faker.providers.internet.en_AU
import faker.providers.internet.en_US
import faker.providers.internet.fa_IR
import faker.providers.internet.fi_FI
import faker.providers.internet.fr_CH
import faker.providers.internet.fr_FR
import faker.providers.internet.hr_HR
import faker.providers.internet.ja_JP
import faker.providers.internet.ko_KR
import faker.providers.internet.no_NO
import faker.providers.internet.pt_BR
import faker.providers.internet.pt_PT
import faker.providers.internet.ru_RU
import faker.providers.internet.sk_SK
import faker.providers.internet.sl_SI
import faker.providers.internet.sv_SE
import faker.providers.internet.uk_UA
import faker.providers.internet.zh_CN
import faker.providers.job
import faker.providers.job.en_US
import faker.providers.job.fa_IR
import faker.providers.job.fr_CH
import faker.providers.job.fr_FR
import faker.providers.job.hr_HR
i | mport faker.providers.job.pl_PL
import faker.providers.job.ru_RU
import faker.providers.job.uk_UA
import faker.providers.job.zh_TW
import faker.providers.lorem
import faker.providers.lorem.el_GR
import faker | .providers.lorem.la
import faker.providers.lorem.ru_RU
import faker.providers.misc
import faker.providers.misc.en_US
import faker.providers.person
import faker.providers.person.bg_BG
import faker.providers.person.cs_CZ
import faker.providers.person.de_AT
import faker.providers.person.de_DE
import faker.providers.person.dk_DK
import faker.providers.person.el_GR
import faker.providers.person.en
import faker.providers.person.en_GB
import faker.providers.person.en_US
import faker.providers.person.es_ES
import faker.providers.person.es_MX
import faker.providers.person.fa_IR
import faker.providers.person.fi_FI
import faker.providers.person.fr_CH
import faker.providers.person.fr_FR
import faker.providers.person.hi_IN
import faker.providers.person.hr_HR
import faker.providers.person.it_IT
import faker.providers.person.ja_JP
import faker.providers.person.ko_KR
import faker.providers.person.lt_LT
import faker.providers.person.lv_LV
import faker.providers.person.ne_NP
import faker.providers.person.nl_NL
import faker.providers.person.no_NO
import faker.providers.person.pl_PL
import faker.providers.person.pt_BR
import faker.providers.person.pt_PT
import faker.providers.person.ru_RU
import faker.providers.person.sl_SI
import faker.providers.person.sv_SE
import faker.providers.person.tr_TR
import faker.providers.person.uk_UA
import faker.providers.person.zh_CN
import faker.providers.person.zh_TW
import faker.providers.phone_number
import faker.providers.phone_number.bg_BG
import faker.providers.phone_number.bs_BA
import faker.providers.phone_number.cs_CZ
import faker.providers.phone_number.de_DE
import faker.providers.phone_number.dk_DK
import faker.providers.phone_number.el_GR
import faker.providers.phone_number.en_AU
import faker.providers.phone_number.en_CA
import faker.providers.phone_number.en_GB
import faker.providers.phone_number.en_US
import faker.providers.phone_number.es_ES
import faker.providers.phone_number.es_MX
import faker.providers.phone_number.fa_IR
import faker.providers.phone_number.fi_FI
import faker.providers.phone_number.fr_CH
import faker.providers.phone_number.fr_FR
import faker.providers.phone_number.hi_IN
import faker.providers.phone_number.hr_HR
import faker.providers.phone_number.it_IT
import faker.providers.phone_number.ja_JP
import faker.providers.phone_number.ko_KR
import faker.providers.phone_number.lt_LT
import faker.providers.phone_number.lv_LV
import faker.providers.phone_number.ne_NP
import faker.providers.phone_number.nl_BE
import faker.providers.phone_number.nl_NL
import faker.providers.phone_number.no_NO
import faker.providers.phone_number.pl_PL
import faker.providers.phone_number.pt_BR
import faker.providers.phone_number.pt_PT
import faker.providers.phone_number.ru_RU
import faker.providers.phone_number.sk_SK
import faker.providers.phone_number.sl_SI
import faker.providers.phone_number.sv_SE
import faker.providers.phone_number.tr_TR
import faker.providers.phone_number.uk_UA
import faker.providers.phone_number.zh_CN
import faker.providers.phone_number.zh_TW
import faker.providers.profile
import faker.providers.profile.en_US
import faker.providers.python
import faker.providers.python.en_US
import faker.providers.ssn
import faker.providers.ssn.en_CA
import faker.providers.ssn.en_US
import faker.providers.ssn.fi_FI
import faker.providers.ssn.fr_CH
import faker.providers.ssn.hr_HR
import faker.providers.ssn.it_IT
import faker.providers.ssn.ko_KR
import faker.providers.ssn.nl_BE
import faker.providers.ssn.nl_NL
import faker.providers.ssn.pt_BR
import faker.providers.ssn.ru_RU
import faker.providers.ssn.sv_SE
import faker.providers.ssn.uk_UA
import faker.providers.ssn.zh_CN
import faker.providers.ssn.zh_TW
import faker.providers.user_agent
import faker.providers.user_agent.en_US
import faker.utils
|
OpenTreeOfLife/opentree | custom_import.py | Python | bsd-2-clause | 7,868 | 0.001398 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
*****************************
NOTE that this is a modified version from web2py 2.8.2. For full details on what has changed, see
https://github.com/OpenTreeOfLife/opentree/commits/master/custom_import.py
This file was patched (by jimallman, on 10/10/2017) to restore working python
imports. See the problems and solution reported here:
https://groups.google.com/forum/#!topic/web2py/k5193zQX6kM
*****************************
"""
import __builtin__
import os
import sys
import threading
import traceback
from gluon import current
NATIVE_IMPORTER = __builtin__.__import__
INVALID_MODULES = set(('', 'gluon', 'applications', 'custom_import'))
# backward compatibility API
def custom_import_install():
if __builtin__.__import__ == NATIVE_IMPORTER:
INVALID_MODULES.update(sys.modules.keys())
__builtin__.__import__ = custom_importer
def track_changes(track=True):
assert track in (True, False), "must be True or False"
current.request._custom_import_track_changes = track
def is_tracking_changes():
return current.request._custom_import_track_changes
class CustomImportException(ImportError):
pass
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1):
"""
The web2py custom importer. Like the standard Python importer but it
tries to transform import statements as something like
"import applications.app_name.modules.x".
If the import failed, fall back on naive_importer
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
try:
if current.request._custom_import_track_changes:
base_importer = TRACK_IMPORTER
else:
base_importer = NATIVE_IMPORTER
except: # there is no current.request (should never happen)
base_importer = NATIVE_IMPORTER
# if not relative and not from applications:
if hasattr(current, 'request') \
and level <= 0 \
and not name.partition('.')[0] in INVALID_MODULES \
and isinstance(globals, dict):
import_tb = None
try:
try:
oname = name if not name.startswith('.') else '.'+name
return NATIVE_IMPORTER(oname, globals, locals, fromlist, level)
except ImportError:
items = current.request.folder.split(os.path.sep)
if not items[-1]:
items = items[:-1]
modules_prefix = '.'.join(items[-2:]) + '.modules'
if not fromlist:
# import like "import x" or "import x.y"
result = None
for itemname in name.split("."):
itemname = itemname.encode('utf-8')
new_mod = base_importer(
modules_prefix, globals, locals, [itemname], level)
try:
result = result or new_mod.__dict__[itemname]
except KeyError, e:
raise ImportError, 'Cannot import module %s' % str(e)
modules_prefix += "." + itemname
return result
else:
# import like "from x import a, b, ..."
pname = modules_prefix + "." + name
return base_importer(pname, globals, locals, fromlist, level)
except ImportError, e1:
import_tb = sys.exc_info()[2]
try:
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
except ImportError, e3:
raise ImportError, e1, import_tb # there an import error in the module
except Exception, e2:
raise e2 # there is an error in the module
finally:
if import_tb:
import_tb = None
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
class TrackImporter(object):
"""
An importer tracking the date of the module files and reloading them when
they have changed.
"""
THREAD_LOCAL = threading.local()
PACKAGE_PATH_SUFFIX = os.path.sep + "__init__.py"
def __init__(self):
self._import_dates = {} # Import dates of the files of the modules
def __call__(self, name, globals=None, locals=None, fromlist=None, level=-1):
"""
The import method itself.
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
try:
# Check the date and reload if needed:
self._update_dates(name, globals, locals, fromlist, level)
# Try to load the module and update the dates if it works:
result = NATIVE_IMPORTER(name, globals, locals, fromlist, level)
# Module maybe loaded for the 1st time so we need to set the date
self._update_dates(name, globals, locals, fromlist, level)
return result
except Exception, e:
raise # Don't hide something that went wrong
def _update_dates(self, name, globals, locals, fromlist, level):
"""
Update all the dates associated to the statement import. A single
import statement may import many modules.
"""
self._reload_check(name, globals, locals, level)
for fromlist_name in fromlist or []:
pname = "%s.%s" % (name, fromlist_name)
self._reload_check(pname, globals, locals, level)
def _reload_check(self, name, globals, locals, level):
"""
Update the date associated to the module and reload the module if
the file has changed.
"""
module = sys.modules.get(name)
file = self._get_module_file(module)
if file:
date = self._import_dates.get(file)
new_date = None
reload_mod = False
mod_to_pack = False # Module turning into a package? (special case)
try:
| new_date = os.path.getmtime(file)
except:
| self._import_dates.pop(file, None) # Clean up
# Handle module changing in package and
#package changing in module:
if file.endswith(".py"):
# Get path without file ext:
file = os.path.splitext(file)[0]
reload_mod = os.path.isdir(file) \
and os.path.isfile(file + self.PACKAGE_PATH_SUFFIX)
mod_to_pack = reload_mod
else: # Package turning into module?
file += ".py"
reload_mod = os.path.isfile(file)
if reload_mod:
new_date = os.path.getmtime(file) # Refresh file date
if reload_mod or not date or new_date > date:
self._import_dates[file] = new_date
if reload_mod or (date and new_date > date):
if mod_to_pack:
# Module turning into a package:
mod_name = module.__name__
del sys.modules[mod_name] # Delete the module
# Reload the module:
NATIVE_IMPORTER(mod_name, globals, locals, [], level)
else:
reload(module)
def _get_module_file(self, module):
"""
Get the absolute path file associated to the module or None.
"""
file = getattr(module, "__file__", None)
if file:
# Make path absolute if not:
file = os.path.splitext(file)[0] + ".py" # Change .pyc for .py
if file.endswith(self.PACKAGE_PATH_SUFFIX):
file = os.path.dirname(file) # Track dir for packages
return file
TRACK_IMPORTER = TrackImporter()
|
LuqueDaniel/pybooru | pybooru/exceptions.py | Python | mit | 1,211 | 0 | # -*- coding: utf-8 -*-
"""pybooru.exceptions
This module contains Pybooru exceptions.
Classes:
* PybooruError -- Main Pybooru exception class.
* PybooruHTTPError -- Manages HTTP status errors.
* PybooruAPIError -- Manages all API errors.
"""
# __furute__ imports
from __future__ import absolute_import
# pybooru imports
from .resources import HTTP_STATUS_CODE
class PybooruError(Exception):
"""Class t | o catch Pybooru error message."""
pass
class PybooruHTTPError(PybooruError):
"""Class to catch HTTP error message."""
def __init__(self, msg, http_code, url):
"""Initialize PybooruHTTPError.
Keyword arguments:
msg (str): The error message.
http_code (int): The HTTP | status code.
url (str): The URL.
"""
super(PybooruHTTPError, self).__init__(msg, http_code, url)
self._msg = "{0}: {1} - {2}, {3} - URL: {4}".format(
msg, http_code, HTTP_STATUS_CODE[http_code][0],
HTTP_STATUS_CODE[http_code][1], url)
def __str__(self):
"""Print exception."""
return self._msg
class PybooruAPIError(PybooruError):
"""Class to catch all API errors."""
pass
|
lociii/symcon-index | heroku/urls.py | Python | mit | 376 | 0 | # -*- coding: UTF-8 -*-
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.staticfiles.urls impor | t staticfiles_urlpatterns
urlpatterns = [
url(r'^', include('symcon.urls' | )),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^admin/', include(admin.site.urls)),
]
urlpatterns += staticfiles_urlpatterns()
|
zymtech/Scrapiders | jd_51job/jd_51job/spiders/jd.py | Python | mit | 3,113 | 0.00771 | # -*- coding : uft-8 -*-
import scrapy
import urllib
from jd_51job.items import Jd51JobItem
from scrapy.http import Request
from scrapy.exceptions import IgnoreRequest
import os
import datetime
import re
class Jd51JobSpider(scrapy.Spider):
name = "jd_51job"
allowed_domain = ["51job.com"]
datafile = os.path.join(os.getcwd(),"jobs_51job.txt")
keywords = ' '.join(open(datafile,'rb').readlines()).split(' ')
keywordcount = 0
keyword = keywords[keywordcount]
keywordcode = urllib.quote(keyword)
start_url = "http://search.51job.com/jo | bsearch/search_result.php?"
urls = []
for kw in keywords:
url = start_url + 'keyword=' + urllib.quote(keyword)
urls.append(ur | l)
def start_requests(self):
for url in self.urls:
yield scrapy.http.Request(url, callback=self.parse0, errback=self.errback)
def parse0(self, response):
print response.url
pagestr = response.xpath('//div[@class="dw_page"]//span[@class="td"]/text()').extract()[0]
page = re.search("[0-9]+",pagestr).group() # unicode
for i in range(1,int(page)+1):
url = response.url + '&' + 'curr_page=' + str(i)
yield scrapy.Request(url, callback=self.parse, errback=self.errback)
def errback(self, response):
raise IgnoreRequest("ignore this request")
def parse(self, response):
try:
joblink = response.xpath('//div[@id="resultList"]/div[@class="el"]//p//span//a/@href').extract()
title = response.xpath('//div[@id="resultList"]/div[@class="el"]//p[@class="t1 "]//a/text()').extract()
company = response.xpath('//div[@id="resultList"]/div[@class="el"]//span[@class="t2"]/a/text()').extract()
updatetime = response.xpath('//div[@id="resultList"]/div[@class="el"]//span[@class="t5"]/text()').extract()
salary = response.xpath('//div[@id="resultList"]/div[@class="el"]//span[@class="t4"]/text()').extract()
city = response.xpath('//div[@id="resultList"]/div[@class="el"]//span[@class="t3"]/text()').extract()
for i in range(49):
item = Jd51JobItem()
item['joblink'] = str(joblink[i])
item['title'] = title[i]
item['company'] = company[i]
item['updatetime'] = datetime.datetime.now().strftime("%Y") + '-' + updatetime[i]
item['salary'] = salary[i]
item['city'] = city[i]
#item['crawltime'] = datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S GMT")
item['crawltime'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
yield Request(item['joblink'], meta={'pitem':item},callback=self.parse_jobdetail)
except BaseException as e :
print e
def parse_jobdetail(self,response):
item = response.meta['pitem']
try:
item['jobdetail'] = '<br>'.join(response.xpath('//div[@class="bmsg job_msg inbox"]/text()').extract()).encode('utf8')
except BaseException:
pass
return item |
Jorge-Rodriguez/ansible | lib/ansible/modules/network/f5/bigip_apm_policy_fetch.py | Python | gpl-3.0 | 15,623 | 0.001088 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_apm_policy_fetch
short_description: Exports the APM policy or APM access profile from remote nodes.
description:
- Exports the apm policy or APM access profile from remote nodes.
version_added: 2.8
options:
name:
description:
- The name of the APM policy or APM access profile exported to create a file on the remote device for downloading.
required: True
dest:
description:
- A directory to save the file into.
type: path
file:
description:
- The name of the file to be created on the remote device for downloading.
type:
description:
- Specifies the type of item to export from device.
choices:
- profile_access
- access_policy
default: profile_access
force:
description:
- If C(no), the file will only be transferred if it does not exist in the the destination.
default: yes
type: bool
partition:
description:
- Device partition to which contain APM policy or APM access profile to export.
default: Common
notes:
- Due to ID685681 it is not possible to execute ng_* tools via REST api on v12.x and 13.x, once this is fixed
this restriction will be removed.
- Requires BIG-IP >= 14.0.0
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Export APM access profile
bigip_apm_policy_fetch:
name: foobar
file: export_foo
dest: /root/download
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Export APM access policy
bigip_apm_policy_fetch:
name: foobar
file: export_foo
dest: /root/download
type: access_policy
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Export APM access profile, autogenerate name
bigip_apm_policy_fetch:
name: foobar
dest: /root/download
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the APM policy or APM access profile to be exported.
returned: changed
type: str
sample: APM_policy_global
file:
description:
- Name of the exported file on the remote BIG-IP to download. If not
specified, then this will be a randomly generated filename.
returned: changed
type: str
sample: foobar_file
dest:
description: Local path to download exported APM policy.
returned: changed
type: str
sample: /root/downloads/profile-foobar_file.conf.tar.gz
type:
description: Set to specify type of item to export.
returned: changed
type: str
sample: access_policy
'''
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_nam | e
from library.module_utils.network.f5.icontrol import download_file
from library.module_utils.network.f5.icontrol import | tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import download_file
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = [
'name',
'file',
'dest',
'type',
'force',
]
updatables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
def _item_exists(self):
if self.type == 'access_policy':
uri = 'https://{0}:{1}/mgmt/tm/apm/policy/access-policy/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.partition, self.name)
)
else:
uri = 'https://{0}:{1}/mgmt/tm/apm/profile/access/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.partition, self.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
return True
return False
@property
def file(self):
if self._values['file'] is not None:
return self._values['file']
result = next(tempfile._get_candidate_names()) + '.tar.gz'
self._values['file'] = result
return result
@property
def fulldest(self):
result = None
if os.path.isdir(self.dest):
result = os.path.join(self.dest, self.file)
else:
if os.path.exists(os.path.dirname(self.dest)):
result = self.dest
else:
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(result))
except OSError as e:
if "permission denied" in str(e).lower():
raise F5ModuleError(
"Destination directory {0} is not accessible".format(os.path.dirname(result))
)
raise F5ModuleError(
"Destination directory {0} does not exist".format(os.path.dirname(result))
)
if not os.access(os.path.dirname(result), os.W_OK):
raise F5ModuleError(
"Destination {0} not writable".format(os.path.dirname(result))
)
return result
@property
def name(self):
if not self._item_exists():
raise F5ModuleError('The provided {0} with the name {1} does not exist on device.'.format(
self.type, self._values['name'])
)
return self._values['name']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChan |
blueburningcoder/nupic | tests/swarming/nupic/swarming/experiments/spatial_classification/description.py | Python | agpl-3.0 | 15,433 | 0.002592 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'fields': [],
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'address': {
'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21
},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'clipInput': True,
'maxval': 200,
'minval': 0,
'n': 1500,
'type': 'ScalarEncoder',
'w': 21
},
'gym': {
'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21
},
'timestamp_dayOfWeek': {
'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'
},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'
}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
| # 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see | also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribu |
hellolintong/LinDouFm | resource/channel.py | Python | mit | 1,825 | 0.000548 | #!/usr/bin/env python
# encoding: utf-8
from flask.ext.restful import Resource, fields, marshal_with, marshal
from .base import BaseArgs, LengthField, ChannelKey
from database.channel.channel_model import get_channel_status, get_channel, update_channel, delete_channel
class ChannelQueryArgs(BaseArgs):
def rules(self):
self.parser.add_argument('start', type=int)
self.parser.add_argument('end', type=int)
self.parser.add_argument('key', type=ChannelKey)
self.parser.add_argument('name', type=unicode)
self.parser.add_argument('playable', type=bool)
"""
class ChannelPatchArgs(BaseArgs):
def rules(self):
self.parser.add_argument('name', type=unicode)
self.parser.add_argument('update_num', type=int)
self.parser.add_argument('playable', type=bool)
"""
channel_status_fields = {
'count': fields.Integer
}
channel_fields = {
'key': fields.String,
'name': fields.String,
'music_list': LengthField,
'upload_date': fields.DateTime,
'update_num': fields.Integer,
'playable': fields.Raw,
}
class ChannelQueryResource(Resource):
def get(self):
args = ChannelQueryArgs().args
if not args: |
return marshal(get_channel_status(), channel_status_fields)
ret_channels = get_channel(**args)
return marshal(ret_channels, channel_fields)
"""
class ChannelResource( | Resource):
@authenticated('admin')
@marshal_with(channel_fields)
def patch(self, key):
args = ChannelPatchArgs().args
channel = get_channel(key=key)[0]
update_channel(channel, **args)
channel = get_channel(key=key)[0]
return channel
@authenticated('admin')
def delete(self, key):
channel = get_channel(key=key)[0]
delete_channel(channel)
"""
|
TD22057/T-Home | python/tHome/sma/Header.py | Python | bsd-2-clause | 2,886 | 0.033264 | #===========================================================================
#
# Common data packet structures. Used for requests and replies.
#
#===========================================================================
from .. import util
#===========================================================================
# Struct type codes
uint1 = "B"
uint2 = "H"
uint4 = "I"
uint8 = "Q"
int1 = "b"
int2 = "h"
int4 = "i"
int8 = "q"
#==============================================================================
class Header:
_fields = [
# Header fields
( uint4, 'hdrMagic' ),
( uint4, 'hdrUnknown1' ),
( uint4, 'hdrUnknown2' ),
( uint1, 'packetHi' ), # packet length in little endian hi word
( uint1, 'packetLo' ), # packet length in little endian low word
( uint4, 'signature' ),
( uint1, 'wordLen' ), # int( packetLen / 4 )
( uint1, 'hdrUnknown3' ),
# Common packet fields
( uint2, 'destId', ),
( uint4, 'destSerial', ),
( uint2, 'destCtrl', ),
( uint2, 'srcId', ),
( uint4, 'srcSerial', ),
( uint2, 'srcCtrl', ),
( uint2, 'error', ),
( uint2, 'fragmentId', ),
( uint1, 'packetId', ),
( uint1, 'baseUnknown', ),
]
_hdrSize = 20 # bytes for the header fields.
_nextPacketId = 0
#------------------------------------------------------------------------
def __init__( self ):
assert( self.struct )
self.hdrMagic = 0x00414D53
self.hdrUnknown1 = 0xA0020400
self.hdrUnknown2 = 0x01000000
self.signature = 0x65601000
self.hdrUnknown3 = 0xA0
# NOTE: self.struct must be created by the deriv | ed class. That
# allows this to compute the correct packet length and encode it.
packetLen = len( self.struct ) - self._hdrSize
self.packetHi = ( packetLen >> 8 ) & 0xFF
self.packetLo = packetLen & 0xFF
self.wordLen = int( packetLen / 4 )
# Any destination - | we send to a specific IP address so this
# isn't important.
self.destId = 0xFFFF
self.destSerial = 0xFFFFFFFF
self.destCtrl = 0x00
self.srcId = 0x7d # TODO change this?
self.srcSerial = 0x334657B0 # TODO change this?
self.srcCtrl = 0x00
self.error = 0
self.fragmentId = 0
self.baseUnknown = 0x80
# Packet id is 1 byte so roll over at 256.
self._nextPacketId += 1
if self._nextPacketId == 256:
self._nextPacketId = 1
self.packetId = self._nextPacketId
self._log = util.log.get( "sma" )
#------------------------------------------------------------------------
def bytes( self ):
return self.struct.pack( self )
#------------------------------------------------------------------------
#===========================================================================
|
solvo/organilab | src/laboratory/migrations/0035_auto_20180621_0020.py | Python | gpl-3.0 | 694 | 0.001441 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 06:20
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('laboratory', '0034_group_perms'),
]
operations = [
migrations.RemoveField(
model_name='laboratory',
name='related_labs',
),
migrations.AlterField(
| model | _name='laboratory',
name='organization',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='laboratory.OrganizationStructure'),
),
]
|
WoLpH/python-utils | _python_utils_tests/test_python_utils.py | Python | bsd-3-clause | 272 | 0.003676 | fr | om python_utils import __about__
def test_definitions():
# The setup.py requires this so we better make sure they exist :)
assert __about__.__version__
assert __about__.__author__
assert __about__.__author_email__
assert __about | __.__description__
|
liavkoren/djangoDev | tests/i18n/test_compilation.py | Python | bsd-3-clause | 4,924 | 0.002031 | import os
import stat
import unittest
from django.core.management import call_command, CommandError
from django.core.management.utils import find_command
from django.test import SimpleTestCase
from django.test import override_settings
from django.utils import translation
from django.utils._os import upath
from django.utils.six import StringIO
test_dir = os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), 'commands'))
has_msgfmt = find_command('msgfmt')
@unittest.skipUnless(has_msgfmt, 'msgfmt is mandatory for compilation tests')
class MessageCompilationTests(SimpleTestCase):
def setUp(self):
self._cwd = os.getcwd()
self.addCleanup(os.chdir, self._cwd)
os.chdir(test_dir)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
class PoFileTests(MessageCompilationTests):
LOCALE = 'es_AR'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_bom_rejection(self):
with self.assertRaises(CommandError) as cm:
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertIn("file has a BOM (Byte Order Mark)", cm.exception.args[0])
self.assertFalse(os.path.exists(self.MO_FILE))
def test_no_write_access(self):
mo_file_en = 'locale/en/LC_MESSAGES/django.mo'
err_buffer = StringIO()
# put file in read-only mode
old_mode = os.stat(mo_file_en).st_mode
os.chmod(mo_file_en, stat.S_IREAD)
try:
call_command('compilemessages', locale=['en'], stderr=err_buffer, verbosity=0)
err = err_buffer.getvalue()
self.assertIn("not writable location", err)
finally:
os.chmod(mo_file_en, old_mode)
class PoFileContentsTests(MessageCompilationTests):
# Ticket #11240
LOCALE = 'fr'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(PoFileContentsTests, self).setUp()
self.addCleanup(os.unlink, os.path.join(test_dir, self.MO_FILE))
def test_percent_symbol_in_po_file(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE))
class PercentRenderingTests(MessageCompilationTests):
# Ticket #11240 -- Testing rendering doesn't belong here but we are trying
# to keep tests for all the stack together
LOCALE = 'it'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(PercentRenderingTests, self).setUp()
self.addCleanup(os.unlink, os.path.join(test_dir, self.MO_FILE))
@override_settings(LOCALE_PATHS=(os.path.join(test_dir, 'locale'),))
def test_percent_symbol_escaping(self):
from django.template import Template, Context
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
with translation.override(self.LOCALE):
t = Template('{% load i18n %}{% trans "Looks like a str fmt spec %% o but shouldn\'t be interpreted as such" %}')
rendered = t.render(Context({}))
self.assertEqual(rendered, 'IT translation contains %% for the above string')
t = Template('{% load i18n %}{% trans "Completed 50%% of all the tasks" %}')
rendered = t.render(Context({}))
self.assertEqual(rendered, 'IT translation of Completed 50%% of all the tasks')
@override_settings(LOCALE_PATHS=(os.path.join(test_dir, 'locale'),))
class MultipleLocaleCompilationTests(MessageCompilationTests):
MO_FILE_HR = None
MO_FILE_FR = None
def setUp(self):
super(MultipleLocaleCompilationTests, self).setUp()
localedir = os.path.join(test_dir, 'locale')
self.MO_FILE_HR = os.path.join(localedir, 'hr/LC_MESSAGES/django.mo')
self.MO_FILE_FR = os.path.join(localedir, 'fr/LC_MESSAGES/django.mo')
self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_HR))
self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_FR))
def test_one_locale(self):
call_command('compilemessages', locale=['hr'], stdout= | StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
def test_multiple_locales(self):
call_command('compilemessages', locale=['hr', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
self.assertTrue(os.path.exists(self.MO_FILE_FR))
class CompilationErrorHandling(MessageCompilationTests):
LOCALE = 'ja'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
| def setUp(self):
super(CompilationErrorHandling, self).setUp()
self.addCleanup(self.rmfile, os.path.join(test_dir, self.MO_FILE))
def test_error_reported_by_msgfmt(self):
with self.assertRaises(CommandError):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
|
asimshankar/tensorflow | tensorflow/contrib/distribute/python/combinations.py | Python | apache-2.0 | 15,137 | 0.005748 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ | import division
from __future__ | import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
# TODO(priyag): Consider allowing tests in graph mode using soft
# placement.
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = dist |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/rest_framework/tests/test_serializer.py | Python | apache-2.0 | 65,306 | 0.001348 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.fields import BLANK_CHOICE_DASH
from django.test import TestCase
from django.utils.datastructures import MultiValueDict
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, fields, relations
from rest_framework.tests.models import (HasPositiveIntegerAsChoice, Album, ActionItem, Anchor, BasicModel,
BlankFieldModel, BlogPost, BlogPostComment, Book, CallableDefaultValueModel, DefaultValueModel,
ManyToManyModel, Person, ReadOnlyManyToManyModel, Photo, RESTFrameworkModel)
from rest_framework.tests.models import BasicModelSerializer
import datetime
import pickle
class SubComment(object):
def __init__(self, sub_comment):
self.sub_comment = sub_comment
class Comment(object):
def __init__(self, email, content, created):
self.email = email
self.content = content
self.created = created or datetime.datetime.now()
def __eq__(self, other):
return all([getattr(self, attr) == getattr(other, attr)
for attr in ('email', 'content', 'created')])
def get_sub_comment(self):
sub_comment = SubComment('And Merry Christmas!')
return sub_comment
class CommentSerializer(serializers.Serializer):
email = serializers.EmailField()
content = serializers.CharField(max_length=1000)
created = serializers.DateTimeField()
sub_comment = serializers.Field(source='get_sub_comment.sub_comment')
def restore_object(self, data, instance=None):
if instance is None:
return Comment(**data)
for key, val in data.items():
setattr(instance, key, val)
return instance
class NamesSerializer(serializers.Serializer):
first = serializers.CharField()
last = serializers.CharField(required=False, default='')
initials = serializers.CharField(required=False, default='')
class PersonIdentifierSerializer(serializers.Serializer):
ssn = serializers.CharField()
names = NamesSerializer(source='names', required=False)
class BookSerializer(serializers.ModelSerializer):
isbn = serializers.RegexField(regex=r'^[0-9]{13}$', error_messages={'invalid': 'isbn has to be exact 13 numbers'})
class Meta:
model = Book
class ActionItemSerializer(serializers.ModelSerializer):
class Meta:
model = ActionItem
class ActionItemSerializerCustomRestore(serializers.ModelSerializer):
class Meta:
model = ActionItem
def restore_object(self, data, instance=None):
if instance is None:
return ActionItem(**data)
for key, val in data.items():
setattr(instance, key, val)
return instance
class PersonSerializer(serializers.ModelSerializer):
info = serializers.Field(source='info')
class Meta:
model = Person
fields = ('name', 'age', 'info')
read_only_fields = ('age',)
class NestedSerializer(serializers.Serializer):
info = serializers.Field()
class ModelSerializerWithNestedSerializer(serializers.ModelSerializer):
nested = NestedSerializer(source='*')
class Meta:
model = Person
class NestedSerializerWithRenamedField(serializers.Serializer):
renamed_info = serializers.Field(source='info')
class ModelSerializerWithNestedSerializerWithRenamedField(serializers.ModelSerializer):
nested = NestedSerializerWithRenamedField(source='*')
class Meta:
model = Person
class PersonSerializerInvalidReadOnly(serializers.ModelSerializer):
"""
Testing for #652.
"""
info = serializers.Field(source='info')
class Meta:
model = Person
fields = ('name', 'age', 'info')
read_only_fields = ('age', 'info')
class AlbumsSerializer(serializers.ModelSerializer):
class Meta:
model = Album
fields = ['title'] # lists are also valid options
class PositiveIntegerAsChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = HasPositiveIntegerAsChoice
fields = ['some_integer']
class BasicTests(TestCase):
def setUp(self):
self.comment = Comment(
'tom@example.com',
'Happy new year!',
datetime.datetime(2012, 1, 1)
)
self.actionitem = ActionItem(title='Some to do item',)
self.data = {
'email': 'tom@example.com',
'content': 'Happy new year!',
'created': datetime.datetime(2012, 1, 1),
'sub_comment': 'This wont change'
}
self.expected = {
'email': 'tom@example.com',
'content': 'Happy new year!',
'created': datetime.datetime(2012, 1, 1),
'sub_comment': 'And Merry Christmas!'
}
self.person_data = {'name': 'dwight', 'age': 35}
self.person = Person(**self.person_data)
self.person.save()
def test_empty(self):
serializer = CommentSerializer()
expected = {
'email': '',
'content': '',
'created': None
}
self.assertEqual(serializer.data, expected)
def test_retrieve(self):
serializer = CommentSerializer(self.comment)
self.assertEqual(serializer.data, self.expected)
def test_create(self):
serializer = CommentSerializer(data=self.data)
expected = self.comment
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected)
self.assertFalse(serializer.object is expected)
self.assertEqual(serializer.data['sub_comment'], 'And Merry Christmas!')
def test_create_nested(self):
"""Test a serializer with nested data."""
names = {'first': 'John', 'last': 'Doe', 'initials': 'jd'}
data = {'ssn': '1234567890', 'names': names}
serializer = PersonIdentifierSerializer(data=data)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, data)
self.assertFalse(serializer.object is data)
self.assertEqual(serializer.data['names'], names)
def test_cr | eate_partial_nested(self):
| """Test a serializer with nested data which has missing fields."""
names = {'first': 'John'}
data = {'ssn': '1234567890', 'names': names}
serializer = PersonIdentifierSerializer(data=data)
expected_names = {'first': 'John', 'last': '', 'initials': ''}
data['names'] = expected_names
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, data)
self.assertFalse(serializer.object is expected_names)
self.assertEqual(serializer.data['names'], expected_names)
def test_null_nested(self):
"""Test a serializer with a nonexistent nested field"""
data = {'ssn': '1234567890'}
serializer = PersonIdentifierSerializer(data=data)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, data)
self.assertFalse(serializer.object is data)
expected = {'ssn': '1234567890', 'names': None}
self.assertEqual(serializer.data, expected)
def test_update(self):
serializer = CommentSerializer(self.comment, data=self.data)
expected = self.comment
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected)
self.assertTrue(serializer.object is expected)
self.assertEqual(serializer.data['sub_comment'], 'And Merry Christmas!')
def test_partial_update(self):
msg = 'Merry New Year!'
partial_data = {'content': msg}
serializer = CommentSerializer(self.comment, data=partial_data)
self.assertEqual(serializer.is_valid(), False)
serializer = CommentSerializer(self.comment, data=partial_data, partial=True)
expected = self.comment
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected)
self.assertTrue(serializer.object is expected)
self.assertEqual(serializer.data['c |
huggingface/transformers | tests/longformer/test_modeling_longformer.py | Python | apache-2.0 | 29,819 | 0.003622 | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LongformerConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerSelfAttention,
)
class LongformerModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.attention_window = 4
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window + 1` locations
# (assuming no token with global attention, otherwise the last dimension of attentions
# is x + self.attention_window + 1, where x is the number of tokens with global attention)
self.key_length = self.attention_window + 2
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return LongformerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
attention_window=self.attention_window,
)
def create_and_check_attention_mask_determinism(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
output_without_mask = model(input_ids)["last_hidden_state"]
self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4))
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_global_attention_mask(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
global_attention_mask = input_mask.clone()
global_attention_mask[:, input_mask.shape[-1] // 2] = 0
global_attention_mask = global_attention_mask.to(torch_device)
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
)
result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attent | ion_mask)
result = model(input_ids, global_attention_mask=global_attention_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, to | ken_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_l |
CHT5/program-y | bots/y-bot/src/test/extensions/banking/test_balance.py | Python | mit | 467 | 0.004283 | import unittest
from extensions.banking.balance import BankingBalanceExtension
class | BankBalanceExtensionTests(unittest.TestCase):
def setUp(self):
self.bot = None
self.clientid = "testid"
def test_balance(self):
balance = BankingBalanceExtension()
self.assertIsNo | tNone(balance)
result = balance.execute(self.bot, self.clientid, "NOW")
self.assertIsNotNone(result)
self.assertEqual("0.00", result) |
notapresent/rtrss | tests/test_storage_util.py | Python | apache-2.0 | 4,598 | 0.000217 | import unittest
import os
from testfixtures import TempDirectory
from mock import MagicMock, patch, call
import requests
import httplib2
import googleapiclient.errors
from tests import AttrDict
from rtrss.storage import util
class HttpUtilTestCase(unittest.TestCase):
def test_is_retryable_returns_false_on_random_exception(self):
exc = Exception
self.assertFalse(util.is_retryable(exc))
def test_is_retryable_returns_false_on_requests_404(self):
resp = requests.Response()
resp.status_code = 404
exc = requests.RequestException(response=resp)
self.assertFalse(util.is_retryable(exc))
def test_is_retryable_returns_true_on_requests_500(self):
resp = requests.Response()
resp.status_code = 500
exc = requests.RequestException(response=resp)
self.assertTrue(util.is_retryable(exc))
def test_is_retryable_returns_false_on_httplib2_404(self):
resp = httplib2.Response({'status': 404})
exc = googleapiclient.errors.HttpError(resp, '')
self.assertFalse(util.is_retryable(exc))
def test_is_retryable_returns_true_on_httplib2_500(self):
resp = httplib2.Response({'status': 500})
exc = googleapiclient.errors.HttpError(resp, '')
self.assertTrue(util.is_retryable(exc))
def test_retry_on_exception_retries(self):
exc = Exception('Boo!')
func = MagicMock(side_effect=exc)
retry_count = 3
decorated = util.retry_on_exception(
retryable=lambda e: True,
tries=retry_count,
delay=0.01)(func)
try:
decorated()
except type(exc):
pass
expected = [call() for _ in range(retry_count)]
self.assertEqual(func.call_args_list, expected)
class LockedOpenTestCase(unittest.TestCase):
filename = 'testfile.txt'
test_data = 'random text'
def setUp(self):
self.tempdir = TempDirectory()
self.filepath = os.path.join(self.tempdir.p | ath, self.filename)
def tearDown(self):
self.tempdir.cleanup()
| def test_returns_file_object(self):
with util.locked_open(self.filepath, util.M_WRITE) as f:
self.assertIsInstance(f, file)
def test_concurrent_read(self):
self.tempdir.write(self.filename, self.test_data)
with util.locked_open(self.filepath, util.M_READ) as f1:
with util.locked_open(self.filepath, util.M_READ) as f2:
self.assertEqual(self.test_data, f1.read())
self.assertEqual(self.test_data, f2.read())
def test_non_blocking_read_during_write_raises(self):
with util.locked_open(self.filepath, util.M_WRITE):
with self.assertRaises(IOError):
util.locked_open(self.filepath,
util.M_READ,
blocking=False).__enter__()
def test_non_blocking_write_during_read_raises(self):
self.tempdir.write(self.filename, self.test_data)
with util.locked_open(self.filepath, util.M_READ):
with self.assertRaises(IOError):
util.locked_open(self.filepath,
util.M_WRITE,
blocking=False).__enter__()
def test_read(self):
self.tempdir.write(self.filename, self.test_data)
with util.locked_open(self.filepath, util.M_READ) as f:
self.assertEqual(self.test_data, f.read())
def test_write(self):
with util.locked_open(self.filepath, util.M_WRITE) as f:
f.write(self.test_data)
self.assertEqual(self.test_data, self.tempdir.read(self.filename))
class DownloadAndSaveKeyFileTestCase(unittest.TestCase):
filename = 'testfile.txt'
test_data = 'random text'
url = 'test url'
def setUp(self):
self.dir = TempDirectory()
self.filepath = os.path.join(self.dir.path, self.filename)
def tearDown(self):
self.dir.cleanup()
@patch('rtrss.storage.util.requests.get')
def test_calls_requests_get(self, mocked_get):
mocked_get.return_value = AttrDict({'content': self.test_data})
util.download_and_save_keyfile(self.url, self.filepath)
mocked_get.assert_called_once_with(self.url)
@patch('rtrss.storage.util.requests.get')
def test_store_result(self, mocked_get):
mocked_get.return_value = AttrDict({'content': self.test_data})
util.download_and_save_keyfile(self.url, self.filepath)
self.assertEqual(self.test_data, self.dir.read(self.filename))
|
wen96/django-boilerplate | my_project/settings/wsgi.py | Python | mit | 393 | 0 | """
WSGI config for my_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from dja | ngo.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.config" | )
application = get_wsgi_application()
|
bondar-pavel/infoblox-client | infoblox_client/tests/base.py | Python | apache-2.0 | 105 | 0 | import | unittest
class TestCase(unittest.TestCase):
"""Test case base class f | or all unit tests."""
|
memo/tensorflow | tensorflow/python/summary/text_summary_test.py | Python | apache-2.0 | 2,226 | 0.005391 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
from tensorflow.python.summary import text_summa | ry
class TextPluginTest(test_util.TensorFlowTestCase):
"""Test the Text Summary API.
These tests are focused on testing the API design of the text_summary method.
It doesn't test the PluginAsset and tensors registry functionality, because
that is better tested by the text_plugin test that actually consumes that
metadata.
"""
def testTextSummaryAPI(self):
with self.test_session():
| with self.assertRaises(ValueError):
num = array_ops.constant(1)
text_summary.text_summary("foo", num)
# The API accepts vectors.
arr = array_ops.constant(["one", "two", "three"])
summ = text_summary.text_summary("foo", arr)
self.assertEqual(summ.op.type, "TensorSummary")
# the API accepts scalars
summ = text_summary.text_summary("foo", array_ops.constant("one"))
self.assertEqual(summ.op.type, "TensorSummary")
def testTextSummaryCollections(self):
text_summary.text_summary("bar", array_ops.constant("2"), collections=[])
summaries = framework_ops.get_collection(framework_ops.GraphKeys.SUMMARIES)
self.assertEqual(len(summaries), 0)
if __name__ == "__main__":
googletest.main()
|
GMDSP-Linked-Data/RDF-work-in-progress | salford/gmdspconverters/streetlighting.py | Python | mit | 2,324 | 0.006454 | __author__ = 'jond'
import csv
import re
from rdflib import URIRef, Literal, Namespace, RDF
from gmdspconverters import utils
STREETLIGHT = Namespace('http://data.gmdsp.org.uk/id/salford/streetlighting/')
STREETLIGHT_ONT = Namespace('http://data.gmdsp.org.uk/def/council/streetlighting/')
def convert(graph, input_path):
reader = csv.DictReader(open(input_path, mode='r'))
for row in reader:
if row["Feature ID"]:
sl = STREETLIGHT[utils.idify(row["Feature ID"])]
graph.add((sl, RDF.type, STREETLIGHT_ONT["Streetlight"]))
graph.add((sl, utils.RDFS['label'], Literal("Streetlight with ID " + row["Feature ID"])))
address = utils.idify(row["Feature ID"])
graph.add((sl, utils.VCARD['hasAddress'], STREETLIGHT["address/"+address]))
# now add the address VCARD
vcard = STREETLIGHT["address/"+address]
graph.add((vcard, RDF.type, utils.VCARD["Location"]))
graph.add | ((vcard, utils.RDFS['label'], Literal("Address of streetlight with ID " + row["Feature ID"])))
graph.add((vcard, utils.VCARD['street-address'], Literal(row["RoadName"])))
#graph.add((vcard, utils.VCARD['postal-code'], Literal(row["POSTCODE"])))
#graph.ad | d((vcard, utils.POST['postcode'], URIRef(utils.convertpostcodeto_osuri(row["POSTCODE"]))))
# location information
graph.add((sl, utils.OS["northing"], Literal(row["Northing"])))
graph.add((sl, utils.OS["easting"], Literal(row["Easting"])))
# add conversion for lat/long
lat_long = utils.ENtoLL84(float(row["Easting"]), float(row["Northing"]))
graph.add((sl, utils.GEO["long"], Literal(lat_long[0])))
graph.add((sl, utils.GEO["lat"], Literal(lat_long[1])))
# street light specific stuff
if row["Lamp Wattage"]:
watts = re.findall('\d+', row["Lamp Wattage"])[0]
graph.add((sl, STREETLIGHT_ONT['wattage'], Literal(watts)))
graph.add((sl, STREETLIGHT_ONT['lampType'], Literal(row["Lamp Type"])))
if row["Mounting Height"]:
height = re.findall('\d+', row["Mounting Height"])[0]
graph.add((sl, STREETLIGHT_ONT['columnHeight'], Literal(height)))
|
argivaitv/argivaitv | plugin.video.salts/scrapers/local_scraper.py | Python | gpl-2.0 | 7,215 | 0.003881 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import json
from salts_lib import kodi
import xbmc
import urlparse
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import SORT_KEYS
BASE_URL = ''
class Local_Scraper(scraper.Scraper):
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.def_quality = int(kodi.get_setting('%s-def-quality' % (self.get_name())))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'Local'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s (%s views)' % (item['quality'], item['host'], item['views'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
params = urlparse.parse_qs(source_url)
if video.video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
result_key = 'moviedetails'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
result_key = 'episodedetails'
run = cmd % (params['id'][0])
meta = xbmc.executeJSONRPC(run)
meta = json.loads(meta)
log_utils.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and result_key in meta['result']:
details = meta['result'][result_key]
def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
stream_details = details['streamdetails']
if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
host['quality'] = self._width_get_quality(stream_details['video'][0]['width'])
hosters.append(host)
return hosters
def get_url(self, video):
return super(Local_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
params = urlparse.parse_qs(show_url)
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
"limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'
base_url = 'video_type=%s&id=%s'
episodes = []
force_title = self._force_title(video)
if not force_title:
run = cmd % (params['id'][0], video.season, 'episode', video.episode)
meta = xbmc.executeJSONRPC(run)
meta = json.loads(meta)
log_utils.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
else:
log_utils.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and not episodes:
run = cmd % (params['id'][0], video.season, 'title', video.ep_title.encode('utf-8'))
meta = xbmc.executeJSONRPC(run)
meta = json.loads(meta)
log_utils.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
for episode in episodes:
if episode['file'].endswith('.strm'):
continue
return base_url % (video.video_type, episode['episodeid'])
@classmethod
def get_settings(cls):
settings = super(Local_Scraper, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-def-quality" type="enum" label=" Default Quality" values="None|Low|Medium|High|HD720|HD1080" default="0" visible="eq(-4,true)"/>' % (name))
return settings
def search(self, video_type, title, year):
filter_str = '{"field | ": "title", "operator": "contains", "value": "%s"}' % (title)
if year: filter_str = '{"and": [%s, {"field": "year", "operator": "is", "value": "%s"}]}' % (filter_str, year)
if video_type == VIDEO_TYPES.MOVIE:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMo | vies", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year", "file", "streamdetails"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libMovies"}'
result_key = 'movies'
id_key = 'movieid'
else:
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year"], \
"sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libTvShows"}'
result_key = 'tvshows'
id_key = 'tvshowid'
results = []
cmd = cmd % (filter_str)
meta = xbmc.executeJSONRPC(cmd)
meta = json.loads(meta)
log_utils.log('Search Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and result_key in meta['result']:
for item in meta['result'][result_key]:
if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith('.strm'):
continue
result = {'title': item['title'], 'year': item['year'], 'url': 'video_type=%s&id=%s' % (video_type, item[id_key])}
results.append(result)
return results
|
CodyKochmann/generators | generators/chain.py | Python | mit | 1,291 | 0.002324 | # -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2017-09-09 14:58:43
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2017-12-09 10:27:36
from functools import partial
from strict_functions import strict_globals
@strict_globals(partial=partial)
def chain(*args):
"""itertools.chain, just better"""
has_iter = partial(hasattr, name='__iter__')
# check if a single iterable is being passed for
# the case that it's a generator of generators
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
for arg in args:
# if the arg is iterable
if hasattr(arg, '__iter__'):
# iterate through it
for i in arg:
yield i
# otherwise
else:
# yield the whole argument
yield arg
del partial
del strict_globals
if __name__ == '__main__':
import itertools as itr
def show(generator):
''' prints a generator '''
| print('-'*30)
| print(tuple(generator))
print('-'*30)
show(chain())
# this is a generator of generators
g = (iter(range(10)) for i in range(10))
show(chain(g))
# doing this in itertools would do this
g = (iter(range(10)) for i in range(10))
show(itr.chain(g))
|
amolborcar/learnpythonthehardway | ex17.py | Python | mit | 649 | 0.003082 | from sys import argv
from os.path import exists
script, from_file, to_file = argv
print "Copying from %s to %s..." % (from_file, to_file)
# we could do these two on one line, how?
# in_file = open(from_file)
# indata = in_file.read()
# like this!
indata = open(from_file).read()
print "The input file is %d bytes long." % len(indata)
prin | t "Does the output file exist? %r" % exists(to_file)
print "Ready, hit RETURN to continue, CTRL-C to abort."
raw_input("> ")
out_file = open(to_file, "w")
out_file.write(indata)
print "Alright, all done!"
out_file.close()
# in_file.close() # this line isn't necessary since in_file is no longer a thi | ng |
rspavel/spack | var/spack/repos/builtin/packages/py-websocket-client/package.py | Python | lgpl-2.1 | 997 | 0.005015 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyWebsocketClient(PythonPackage):
"""WebSocket client for P | ython. hybi13 is supported."""
homepage = "https://g | ithub.com/websocket-client/websocket-client.git"
url = "https://pypi.io/packages/source/w/websocket_client/websocket_client-0.57.0.tar.gz"
version('0.57.0', sha256='d735b91d6d1692a6a181f2a8c9e0238e5f6373356f561bb9dc4c7af36f452010')
version('0.56.0', sha256='1fd5520878b68b84b5748bb30e592b10d0a91529d5383f74f4964e72b297fd3a')
depends_on('python@2.6:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('py-backports-ssl-match-hostname', when='^python@2.6:2.7.9', type=('build', 'run'))
depends_on('py-argparse', when='^python@:2.6', type=('build', 'run'))
|
fsys/js.lesscss | js/lesscss/script.py | Python | bsd-3-clause | 67 | 0.014925 | # -*- coding: utf-8 -*-
import subprocess
import shutil
import | os | |
partofthething/home-assistant | tests/components/homekit_controller/test_config_flow.py | Python | apache-2.0 | 22,205 | 0.001531 | """Tests for homekit_controller config flow."""
from unittest import mock
import unittest.mock
from unittest.mock import patch
import aiohomekit
from aiohomekit.model import Accessories, Accessory
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
from homeassistant.components.homekit_controller import config_flow
from homeassistant.helpers import device_registry
from tests.common import MockConfigEntry, mock_device_registry
PAIRING_START_FORM_ERRORS = [
(KeyError, "pairing_failed"),
]
PAIRING_START_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error"),
(aiohomekit.UnavailableError, "already_paired"),
]
PAIRING_TRY_LATER_ERRORS = [
(aiohomekit.BusyError, "busy_error"),
(aiohomekit.MaxTriesError, "max_tries_error"),
(IndexError, "protocol_error"),
]
PAIRING_FINISH_FORM_ERRORS = [
(aiohomekit.exceptions.MalformedPinError, "authentication_error"),
(aiohomekit.MaxPeersError, "max_peers_error"),
(aiohomekit.AuthenticationError, "authentication_error"),
(aiohomekit.UnknownError, "unknown_error"),
(KeyError, "pairing_failed"),
]
PAIRING_FINISH_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error")
]
INVALID_PAIRING_CODES = [
"aaa-aa-aaa",
"aaa-11-aaa",
"111-aa-aaa",
"aaa-aa-111",
"1111-1-111",
"a111-11-111",
" 111-11-111",
"111-11-111 ",
"111-11-111a",
"1111111",
"22222222",
]
VALID_PAIRING_CODES = [
"114-11-111",
"123-45-679",
"123-45-679 ",
"11121111",
"98765432",
" 98765432 ",
]
def _setup_flow_handler(hass, pairing=None):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
finish_pairing = unittest.mock.AsyncMock(return_value=pairing)
discovery = mock.Mock()
discovery.device_id = "00:00:00:00:00:00"
discovery.start_pairing = unittest.mock.AsyncMock(return_value=finish_pairing)
flow.controller = mock.Mock()
flow.controller.pairings = {}
flow.controller.find_ip_by_device_id = unittest.mock.AsyncMock(
return_value=discovery
)
return flow
@pytest.mark.parametrize("pairing_code", INVALID_PAIRING_CODES)
def test_invalid_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid pin code."""
with pytest.raises(aiohomekit.exceptions.MalformedPinError):
config_flow.ensure_pin_format(pairing_code)
@pytest.mark.parametrize("pairing_code", VALID_PAIRING_CODES)
def test_valid_pairing_codes(pairing_code):
"""Test ensure_pin_format corrects format for a valid pin in an alternative format."""
valid_pin = config_flow.ensure_pin_format(pairing_code).split("-")
assert len(valid_pin) == 3
assert len(valid_pin[0]) == 3
assert len(valid_pin[1]) == 2
assert len(valid_pin[2]) == 3
def get_flow_context(hass, result):
"""Get the flow context from the result of async_init or async_configure."""
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
return flow["context"]
def get_device_discovery_info(device, upper_case_props=False, missing_csharp=False):
"""Turn a aiohomekit format zeroconf entry into a homeassistant one."""
record = device.info
result = {
"host": record["address"],
"port": record["port"],
"hostname": record["name"],
"type": "_hap._tcp.local.",
"name": record["name"],
"properties": {
"md": record["md"],
"pv": record["pv"],
"id": device.device_id,
"c#": record["c#"],
"s#": record["s#"],
"ff": record["ff"],
"ci": record["ci"],
"sf": 0x01, # record["sf"],
"sh": "",
},
}
if missing_csharp:
del result["properties"]["c#"]
if upper_case_props:
result["properties"] = {
key.upper(): val for (key, val) in result["properties"].items()
}
return result
def setup_mock_accessory(controller):
"""Add a bridge accessory to a test controller."""
bridge = Accessories()
accessory = Accessory.create_with_info(
name="Koogeek-LS1-20833F",
manufacturer="Koogeek",
model="LS1",
serial_number="12345",
firmware_revision="1.1",
)
service = accessory.add_service(ServicesTypes.LIGHTBULB)
on_char = service.add_char(CharacteristicsTypes.ON)
on_char.value = 0
bridge.add_accessory(accessory)
return controller.add_device(bridge)
@pytest.mark.parametrize("upper_case_props", [True, False])
@pytest.mark.parametrize("missing_csharp", [True, False])
async def test_discovery_works(hass, controller, upper_case_props, missing_csharp):
"""Test a device being discovered."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device, upper_case_props, missing_csharp)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"source": "zeroconf",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing doesn't error error and pairing results
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == {}
async def test_abort_duplicate_flow(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_pair_already_paired_1(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Flag device as already paired
discovery_info["properties"]["sf"] = 0x0
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_paired"
async def test_id_missing(hass, controller):
"""Test id is missing."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Remove id from device
del discovery | _info["properties"]["id"]
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort | "
assert result["reason"] == "invalid_properties"
async def test_discovery_ignored_model(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
discovery_info["properties"]["id"] = "AA:BB:CC:DD:EE:FF"
discovery_info["properties"]["md"] = "HHKBridge1,1"
# Device is discovered
result = await hass.config_entries.flow.async_init(
|
cprogrammer1994/ModernGL | examples/old-examples/GLWindow/particle_system.py | Python | mit | 1,710 | 0.022807 | import math
import random
import struct
import GLWindow
import ModernGL
# Window & Context
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
tvert = ctx.vertex_shader('''
#version 330
uniform vec2 acc;
in vec2 in_pos;
in vec2 in_prev;
out vec2 out_pos;
out vec2 out_prev;
void main() {
out_pos = in_pos * 2.0 - in_prev + acc;
out_prev = in_pos;
}
''')
vert = ctx.vertex_shader('''
#version 330
in vec2 vert;
void main() {
gl_Position = vec4(vert, 0.0, 1.0);
}
''')
frag = ctx.fragment_shader('''
#version 330
out vec4 color;
void main() {
color = vec4(0.30, 0.50, 1.00, 1.0);
}
''')
prog = ctx.program(vert, frag])
transform = ctx.program(tvert, ['out_pos', 'out_prev'])
def pa | rticle():
a = random.uniform(0.0, math.pi * 2.0)
r = random.uniform(0.0, 0.001)
return struct.pack(' | 2f2f', 0.0, 0.0, math.cos(a) * r - 0.003, math.sin(a) * r - 0.008)
vbo1 = ctx.buffer(b''.join(particle() for i in range(1024)))
vbo2 = ctx.buffer(reserve=vbo1.size)
vao1 = ctx.simple_vertex_array(transform, vbo1, ['in_pos', 'in_prev'])
vao2 = ctx.simple_vertex_array(transform, vbo2, ['in_pos', 'in_prev'])
render_vao = ctx.vertex_array(prog, [
(vbo1, '2f8x', ['vert']),
])
transform.uniforms['acc'].value = (0, -0.0001)
idx = 0
ctx.point_size = 5.0
while wnd.update():
ctx.viewport = wnd.viewport
ctx.clear(0.9, 0.9, 0.9)
for i in range(8):
vbo1.write(particle(), offset=idx * struct.calcsize('2f2f'))
idx = (idx + 1) % 1024
render_vao.render(ModernGL.POINTS, 1024)
vao1.transform(vbo2, ModernGL.POINTS, 1024)
ctx.copy_buffer(vbo1, vbo2)
|
linsalrob/EdwardsLab | matplotlib graphs/kde.py | Python | mit | 1,377 | 0.003631 | import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
from scipy.stats.distributions import norm
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors import KernelDensity
data = [1.5] * 7 + [2.5] * 2 + [3.5] * 8 + [4.5] * 3 + [5.5] * 1 + [6.5] * 8
density = gaussian_kde(data)
xs = np.linspace(0, 8, 200)
x_grid = np.linspace(-4.5, 3.5, 1000)
x = np.concatenate([norm(-1, 1.).rvs(400), norm(1, 0.3).rvs(100)])
# print(x)
print("X Grid:" + str(len(x_grid)) + "\t" + str(min(x_grid)) + ":" + str(max(x_grid)) + "\t" + str(x_grid[0:10]))
print("X: " + str(len(x)) + "\t" + str(min(x)) + ":" + str(max(x)) + "\t" + str(x[0:10]))
if (0):
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs))
# plt.plot(data)
plt.show()
bandwidth=0.2
kde_skl = KernelDensity(bandwidt | h=bandwidth)
kde_skl.fit(x[:, np.newaxis])
# score_samples() returns the log-likelihood of the samples
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
print( | log_pdf[0:10])
density = np.exp(log_pdf)
plt.plot(x_grid, density, color='blue', alpha=0.5, lw=3)
#plt.show()
grid = GridSearchCV(KernelDensity(),
{'bandwidth': np.linspace(0.1, 1.0, 30)},
cv=20) # 20-fold cross-validation
# print(x[:, None])
grid.fit(x[:, None])
# print grid.best_params_ |
evanbiederstedt/RRBSfun | scripts/repeat_finder_scripts/repeat_finder_RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.G.py | Python | mit | 706 | 0.009915 | import glob
impor | t numpy as np
import pandas as pd
from numpy import nan
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/RRBS_anno_clean")
repeats = pd.read_csv("repeats_hg19.csv")
annofiles = glob.glob("RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.G*")
def between_range(row):
subset = repeats.loc[(row["chr"] == repeats.chr) & (row.start >= repeats.start) & (row.start <= repeats.end), :]
if subset.empty:
ret | urn np.nan
return subset.repeat_class
#newdf1 = pd.DataFrame()
for filename in annofiles:
df = pd.read_table(filename)
df["hg19_repeats"] = df.apply(between_range, axis = 1)
df.to_csv(str("repeatregions_") + filename + ".csv", index=False)
|
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/do/evolve/reference.py | Python | mit | 4,813 | 0.005195 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.evolve.original Reference, original implementation.
#
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# Import the relevant PTS classes and modules
from pts.evolve.engine import GAEngine, RawScoreCriteria
from pts.evolve.genomes.list1d import G1DList
from pts.evolve import mutators
from pts.evolve import initializators
from pts.evolve import constants
from pts.core.tools.logging import log
from pts.core.tools import time
from pts.core.tools import filesystem as fs
from pts.core.tools.random import setup_prng
from pts.core.basics.configuration import ConfigurationDefinition, ConfigurationReader
# -----------------------------------------------------------------
# Configuration definition
definition = ConfigurationDefinition()
definition.add_positional_optional("seed", int, "the random seed", 4357)
# Get configuration
reader = ConfigurationReader("reference")
config = reader.read(definition)
# -----------------------------------------------------------------
x = np.linspace(12,25,100)
test_data_x = [20., 16., 19.79999924, 18.39999962, 17.10000038, 15.5, 14.69999981, 17.10000038, 15.39999962,
16.20000076,
15., 17.20000076, 16., 17., 14.39999962]
test_data_y = [88.59999847, 71.59999847, 93.30000305, 84.30000305, 80.59999847, 75.19999695, 69.69999695, 82.,
69.40000153, 83.30000305, 79.59999847, 82.59999847, 80.59999847, 83.5, 76.30000305]
# -----------------------------------------------------------------
def fit_function(x, a, b):
"""
This function ...
:param a:
:param b:
:param x:
:return:
"""
retur | n a * x + b
# -----------------------------------------------------------------
def chi_squared_function(chromosome):
"""
This function calculates the chi-squared value for a certain set of parameters | (chromosome)
:param chromosome:
:return:
"""
chi_squared = 0.0
for i in range(len(test_data_x)):
x = test_data_x[i]
y = test_data_y[i]
chromosome_y = fit_function(x, chromosome[0], chromosome[1])
chi_squared += (y - chromosome_y) ** 2.
chi_squared /= 2.0
return chi_squared
# -----------------------------------------------------------------
#seed = 4357
seed = config.seed
prng = setup_prng(seed)
# -----------------------------------------------------------------
# Genome instance
genome = G1DList(2)
genome.setParams(rangemin=0., rangemax=50., bestrawscore=0.00, rounddecimal=2)
genome.initializator.set(initializators.G1DListInitializatorReal)
genome.mutator.set(mutators.G1DListMutatorRealGaussian)
# Set the evaluator function
genome.evaluator.set(chi_squared_function)
# Genetic algorithm instance
ga = GAEngine(genome)
ga.terminationCriteria.set(RawScoreCriteria)
ga.setMinimax(constants.minimaxType["minimize"])
ga.setGenerations(5)
ga.setCrossoverRate(0.5)
ga.setPopulationSize(100)
ga.setMutationRate(0.5)
# Evolve
#ga.evolve(freq_stats=False)
ga.evolve()
print("Final generation:", ga.currentGeneration)
# -----------------------------------------------------------------
# Determine the path to the reference directory
ref_path = fs.join(fs.cwd(), "reference")
fs.create_directory(ref_path)
# -----------------------------------------------------------------
best = ga.bestIndividual()
best_parameter_a = best.genomeList[0]
best_parameter_b = best.genomeList[1]
best_path = fs.join(ref_path, "best.dat")
with open(best_path, 'w') as best_file:
best_file.write("Parameter a: " + str(best_parameter_a) + "\n")
best_file.write("Parameter b: " + str(best_parameter_b) + "\n")
popt, pcov = curve_fit(fit_function, test_data_x, test_data_y)
parameter_a_real = popt[0]
parameter_b_real = popt[1]
print("Best parameter a:", best_parameter_a, " REAL:", parameter_a_real)
print("Best parameter b:", best_parameter_b, " REAL:", parameter_b_real)
plt.figure()
plt.scatter(test_data_x, test_data_y)
plt.plot(x, [fit_function(x_i, best_parameter_a, best_parameter_b) for x_i in x])
plt.plot(x, [fit_function(x_i, parameter_a_real, parameter_b_real) for x_i in x])
plt.ylim(65, 95)
plt.xlim(12,22)
# Save the figure
plot_path = fs.join(ref_path, "best.pdf")
plt.savefig(plot_path)
# -----------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.