repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Nymphet/acgnz-spider | AcgnzSpider/spiders/AcgnzSpider01.py | Python | mit | 3,584 | 0.003627 | # -*- coding: utf-8 -*-
import scrapy
import logging
from scrapy.utils.project import get_project_settings
from time import sleep
from AcgnzSpider.items import AcgnzItem
class AcgnzSpider(scrapy.Spider):
name = 'acgnz.cc'
start_urls = ['http://www.acgnz.cc/sign']
allowed_domains = ['acgnz.cc']
def parse(self, response):
settings = get_project_settings()
yield scrapy.FormRequest.from_response(
response,
method='POST',
# headers={'Content-Type': 'multipart/form-data'},
url='http://www.acgnz.cc/wp-admin/admin-ajax.php?action=theme_custom_sign',
formdata={
'user[email | ]': self.settings.get('LOGIN_CREDENTIALS_EMAIL'),
'user[pwd]': self.settings.get('LOGIN_CREDENTIALS_PWD' | ),
'user[remember]': '1',
'type': 'login',
'theme-nonce': '85dd62e1f6'
},
callback=self.parse_follow_seq
)
def parse_follow_seq(self, response):
if 'success' not in response.body:
raise CloseSpider('login failed')
logging.log(logging.DEBUG, 'login successful, sleeping for 5 seconds')
sleep(5)
for i in range(9000):
yield scrapy.Request(
'http://www.acgnz.cc/{index}'.format(index=i),
meta={'dont_redirect': True,
'handle_httpstatus_list': [302]},
callback=self.parse_page)
def parse_page(self, response):
if response.status == 302:
pass
else:
item = AcgnzItem()
item['url'] = response.url
item['title'] = (response.selector.xpath('//div[@class="entry-content content-reset"]').xpath(
'.//a/@title').extract() + response.selector.xpath('//article[@id]/h2/text()').extract())[0]
item['image_urls'] = response.selector.xpath(
'//div[@class="entry-content content-reset"]').xpath('.//img/@src').extract()
if response.selector.xpath('//div[@class="entry-circle"]/a[@class="meta meta-post-storage"]/@href'):
download_page_href = response.selector.xpath(
'//div[@class="entry-circle"]/a[@class="meta meta-post-storage"]/@href')[0].extract()
yield scrapy.Request(download_page_href, meta={'item': item}, callback=self.parse_download_page)
else:
item['download_link'] = ''
item['download_code'] = ''
item['unarchive_password'] = ''
yield item
def parse_download_page(self, response):
item = response.meta['item']
item['download_link'] = response.selector.xpath('//div[@class="post-download"]').xpath(
'.//a[@class="btn btn-lg btn-success btn-block"]/@href')[-1].extract()
if response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-download-pwd"]/@value'):
item['download_code'] = response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-download-pwd"]/@value')[-1].extract()
if response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-extract-pwd"]/@value'):
item['unarchive_password'] = response.selector.xpath(
'//div[@class="post-download"]').xpath('.//input[@id="theme_custom_storage-0-extract-pwd"]/@value')[-1].extract()
yield item
|
automl/paramsklearn | tests/components/regression/test_sgd.py | Python | bsd-3-clause | 1,757 | 0.003415 | import unittest
from ParamSklearn.components.regression.sgd import SGD
from ParamSklearn.util import _test_regressor, _test_regressor_iterative_fit
import sklearn.metrics
class SGDComponentTest(unittest.TestCase):
def test_default_config | uration(self):
for i in range(10):
predictions, targets = _ | test_regressor(SGD)
self.assertAlmostEqual(0.092460881802630235,
sklearn.metrics.r2_score(y_true=targets,
y_pred=predictions))
def test_default_configuration_iterative_fit(self):
for i in range(10):
predictions, targets = _test_regressor_iterative_fit(SGD)
self.assertAlmostEqual(0.092460881802630235,
sklearn.metrics.r2_score(y_true=targets,
y_pred=predictions))
def test_default_configuration_digits(self):
for i in range(10):
predictions, targets = _test_regressor(SGD, dataset='boston')
self.assertAlmostEqual(-2.9165866511775519e+31,
sklearn.metrics.r2_score(y_true=targets,
y_pred=predictions))
def test_default_configuration_digits_iterative_fit(self):
for i in range(10):
predictions, targets = _test_regressor_iterative_fit(SGD,
dataset='boston')
self.assertAlmostEqual(-2.9165866511775519e+31,
sklearn.metrics.r2_score(y_true=targets,
y_pred=predictions)) |
stacksync/manager | users/views.py | Python | gpl-2.0 | 125 | 0.008 | from django.http import HttpResponse
def | index(request):
return HttpResponse("Hello, world. You're at th | e users index") |
sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/Games4Kids/BasicSprite.py | Python | apache-2.0 | 3,562 | 0.010668 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import pygame
from Axon.Component import component
class BasicSprite(pygame.sprite.Sprite, component):
Inboxes=["translation", "imaging","inbox", "control"]
allsprites = []
def __init__(self, imagepath, name, pos = None,border=40):
pygame.sprite.Sprite.__init__(self)
component.__init__(self)
self.imagepath = imagepath
self.image = None
self.original = None
self.rect = None
self.pos = pos
if self.pos == None:
self.pos = [100,100]
self.dir = ""
self.name = name
self.update = self.sprite_logic().next
self.screensize = (924,658)
self.border = border
self.__class__.allsprites.append(self)
def allSprites(klass):
return klass.allsprites
allSprites = classmethod(allSprites)
def sprite_logic(self):
while 1:
yield 1
def main(self):
self.image = pygame.image.load(self.imagepath)
self.original = self.image
self. | image = self.original
self.rect = self.image.get_rect()
self.rect.center = self.pos
center = list(self.rect.center)
current = self.image
pos = center
dx,dy = 0,0
d = 10 # Change me to change the velocity of the sprite
while 1:
self.image = current
if self.dataReady("imaging"):
self.image = self.recv("imaging")
| current = self.image
if self.dataReady("translation"):
pos = self.recv("translation")
if self.dataReady("inbox"):
event = self.recv("inbox")
if event == "start_up": dy = dy + d
if event == "stop_up": dy = dy - d
if event == "start_down": dy = dy - d
if event == "stop_down": dy = dy + d
if event == "start_right": dx = dx + d
if event == "stop_right": dx = dx - d
if event == "start_left": dx = dx - d
if event == "stop_left": dx = dx + d
if dx !=0 or dy != 0:
self.pos[0] += dx
if self.pos[0] >self.screensize[0]-self.border: self.pos[0] =self.screensize[0]-self.border
if self.pos[1] >self.screensize[1]-self.border: self.pos[1] =self.screensize[1]-self.border
if self.pos[0] <self.border: self.pos[0] = self.border
if self.pos[1] < self.border: self.pos[1] = self.border
self.pos[1] -= dy
self.rect.center = (self.pos)
self.send(self.pos, "outbox")
yield 1
|
riscmaster/risc_maap | risc_msgs/src/foo_msgs/msg/_Landmark.py | Python | bsd-2-clause | 4,376 | 0.020567 | """autogenerated by genpy from foo_msgs/Landmark.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Landmark(genpy.Message):
_md5sum = "2edff07075d48378c13052415416989c"
_type = "foo_msgs/Landmark"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string name
float64 x
float64 y
float64 z
"""
__slots__ = ['name','x','y','z']
_slot_types = ['string','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,x,y,z
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Landmark, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.z is None:
self.z = 0.
else:
self.name = ''
self.x = 0.
self.y = 0.
self.z = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
_x = self
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when | writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
| end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
_x = self
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3d = struct.Struct("<3d")
|
gbiggs/ros_book_sample_code | chapter3/src/message_subscriber.py | Python | apache-2.0 | 275 | 0 | #!/usr/bin/env python
import rospy
from chapter3.msg import Compl | ex
def callback(msg):
print 'Real:', msg.real
print 'Imaginary:', msg.imaginary
print
rospy.init_node('message_subscriber')
sub = rospy.Subscriber('complex' | , Complex, callback)
rospy.spin()
|
Freso/listenbrainz-server | listenbrainz_spark/recommendations/recording/tests/test_candidate.py | Python | gpl-2.0 | 25,386 | 0.003782 | from datetime import datetime
import sys
from listenbrainz_spark.tests import SparkTestCase
from listenbrainz_spark.recommendations.recording import candidate_sets
from listenbrainz_spark.recommendations.recording import create_dataframes
from listenbrainz_s | park import schema, utils, config | , path, stats
from listenbrainz_spark.exceptions import (TopArtistNotFetchedException,
SimilarArtistNotFetchedException)
from pyspark.sql import Row
from unittest.mock import patch
import pyspark.sql.functions as f
from pyspark.sql.types import StructField, StructType, StringType
class CandidateSetsTestClass(SparkTestCase):
recommendation_generation_window = 7
listens_path = path.LISTENBRAINZ_DATA_DIRECTORY
mapping_path = path.MBID_MSID_MAPPING
mapped_listens_path = path.RECOMMENDATION_RECORDING_MAPPED_LISTENS
mapped_listens_subset_path = '/mapped/subset.parquet'
users_path = path.RECOMMENDATION_RECORDING_USERS_DATAFRAME
recordings_path = path.RECOMMENDATION_RECORDINGS_DATAFRAME
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.upload_test_listen_to_hdfs(cls.listens_path)
cls.upload_test_mapping_to_hdfs(cls.mapping_path)
cls.upload_test_mapped_listens_to_hdfs(cls.listens_path, cls.mapping_path, cls.mapped_listens_path)
cls.upload_test_mapping_listens_subset_to_hdfs()
@classmethod
def tearDownClass(cls):
super().delete_dir()
super().tearDownClass()
@classmethod
def upload_test_mapping_listens_subset_to_hdfs(cls):
mapped_df = utils.read_files_from_HDFS(cls.mapped_listens_path)
from_date = stats.offset_days(cls.date, 4)
to_date = cls.date
mapped_listens_subset = candidate_sets.get_listens_to_fetch_top_artists(mapped_df, from_date, to_date)
utils.save_parquet(mapped_listens_subset, cls.mapped_listens_subset_path)
@classmethod
def get_listen_row(cls, date, user_name, credit_id):
test_mapped_listens = Row(
listened_at=date,
mb_artist_credit_id=credit_id,
mb_artist_credit_mbids=["181c4177-f33a-441d-b15d-910acaf18b07"],
mb_recording_mbid="3acb406f-c716-45f8-a8bd-96ca3939c2e5",
mb_release_mbid="xxxxxx",
msb_artist_credit_name_matchable="lessthanjake",
msb_recording_name_matchable="Al's War",
user_name=user_name,
)
return test_mapped_listens
@classmethod
def get_listens(cls):
cls.date = datetime.utcnow()
df1 = utils.create_dataframe(cls.get_listen_row(cls.date, 'vansika', 1), schema=None)
shifted_date = stats.offset_days(cls.date, cls.recommendation_generation_window + 1)
df2 = utils.create_dataframe(cls.get_listen_row(shifted_date, 'vansika', 1), schema=None)
shifted_date = stats.offset_days(cls.date, 1)
df3 = utils.create_dataframe(cls.get_listen_row(shifted_date, 'rob', 2), schema=None)
shifted_date = stats.offset_days(cls.date, 2)
df4 = utils.create_dataframe(cls.get_listen_row(shifted_date, 'rob', 2), schema=None)
test_mapped_df = df1.union(df2).union(df3).union(df4)
return test_mapped_df
def test_get_dates_to_generate_candidate_sets(self):
mapped_df = self.get_listens()
from_date, to_date = candidate_sets.get_dates_to_generate_candidate_sets(mapped_df,
self.recommendation_generation_window)
self.assertEqual(to_date, self.date)
expected_date = stats.offset_days(self.date, self.recommendation_generation_window).replace(hour=0, minute=0, second=0)
self.assertEqual(from_date, expected_date)
def test_get_listens_to_fetch_top_artists(self):
mapped_df = self.get_listens()
from_date, to_date = candidate_sets.get_dates_to_generate_candidate_sets(mapped_df,
self.recommendation_generation_window)
mapped_listens_subset = candidate_sets.get_listens_to_fetch_top_artists(mapped_df, from_date, to_date)
self.assertEqual(mapped_listens_subset.count(), 3)
def test_get_top_artists(self):
mapped_listens = utils.read_files_from_HDFS(self.mapped_listens_path)
top_artist_limit = 1
test_top_artist = candidate_sets.get_top_artists(mapped_listens, top_artist_limit, [])
cols = ['top_artist_credit_id', 'top_artist_name', 'user_name']
self.assertListEqual(sorted(cols), sorted(test_top_artist.columns))
self.assertEqual(test_top_artist.count(), 4)
# empty df
mapped_listens = mapped_listens.select('*').where(f.col('user_name') == 'lala')
with self.assertRaises(TopArtistNotFetchedException):
candidate_sets.get_top_artists(mapped_listens, top_artist_limit, [])
with self.assertRaises(TopArtistNotFetchedException):
candidate_sets.get_top_artists(mapped_listens, top_artist_limit, ['lala'])
def test_get_similar_artists(self):
df = utils.create_dataframe(
Row(
score=1.0,
id_0=1,
name_0="Less Than Jake",
id_1=2,
name_1="blahblah"
),
schema=None
)
df = df.union(utils.create_dataframe(
Row(
score=1.0,
id_0=2,
name_0="blahblah",
id_1=3,
name_1="Katty Peri"
),
schema=None
))
artist_relation_df = df.union(utils.create_dataframe(
Row(
score=1.0,
id_0=3,
name_0="Katty Peri",
id_1=1,
name_1="Less Than Jake"
),
schema=None
))
top_artist_df = self.get_top_artist()
similar_artist_limit = 10
similar_artist_df, similar_artist_df_html = candidate_sets.get_similar_artists(top_artist_df, artist_relation_df,
similar_artist_limit)
self.assertEqual(similar_artist_df.count(), 3)
cols = [
'similar_artist_credit_id', 'similar_artist_name', 'user_name'
]
self.assertListEqual(cols, similar_artist_df.columns)
self.assertEqual(similar_artist_df_html.count(), 4)
cols = [
'top_artist_credit_id',
'top_artist_name',
'similar_artist_credit_id',
'similar_artist_name',
'user_name'
]
self.assertListEqual(cols, similar_artist_df_html.columns)
artist_relation_df = utils.create_dataframe(
Row(
score=1.0,
id_0=6,
name_0="Less Than Jake",
id_1=7,
name_1="Wolfgang Amadeus Mozart"
),
schema=None
)
with self.assertRaises(SimilarArtistNotFetchedException):
candidate_sets.get_similar_artists(top_artist_df, artist_relation_df, similar_artist_limit)
def test_get_top_artist_candidate_set(self):
mapped_listens_df = utils.read_files_from_HDFS(self.mapped_listens_path)
recordings_df = create_dataframes.get_recordings_df(mapped_listens_df, {}, self.recordings_path)
users = create_dataframes.get_users_dataframe(mapped_listens_df, {}, self.users_path)
mapped_listens_subset = utils.read_files_from_HDFS(self.mapped_listens_subset_path)
top_artist_limit = 1
top_artist_df = candidate_sets.get_top_artists(mapped_listens_subset, top_artist_limit, [])
top_artist_candidate_set_df, top_artist_candidate_set_df_html = candidate_sets.get_top_artist_candidate_set(top_artist_df,
recordings_df,
users,
|
essteban/Auto-DJ | Python Scripts/sliceWav.py | Python | gpl-3.0 | 327 | 0.04893 | #!/usr/bin/python
import wave |
import struct
def slice(f,w,init,end):
outFile = wave.open('cutted | _'+f,'w')
outFile.setparams((2,2,44100,0,'NONE','not compressed'))
w.setpos(init)
for i in range((end-w.tell())):
frames = w.readframes(1)
outFile.writeframes(frames)
outFile.close()
|
pratikmshah/practice | py-data-analysis/ftpDownloader-test.py | Python | mit | 831 | 0.008424 | # -*- coding: utf-8 -*-
"""
Created on Sat May 21 16:43:47 2016
@author: Pratik
"""
from ftplib import FTP
import os
# login and download file from ftp site and retrieve file (use default params)
def ftpDownloader(filename, host="ftp.pyclass.com", user="student@pyclass.com", passwd="student123"):
ftp = FTP(host) # get the host url of ftp site
ftp.login(user, passwd) # login with username and password
ftp.cwd('Data') # change directory to Data
os.chdir("/Users/Pratik/Documents/Pratik/Work/practice/py-data-analysis") # change directory
print(ftp.nlst()) # print list of all files in dir
with open(filename, 'wb') as file: # open file and w | /r
ftp.retrbinary('RETR %s' % f | ilename, file.write) # read contents of pdf and write to our file |
Kraymer/beets | beets/dbcore/query.py | Python | mit | 27,242 | 0 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The Query type hierarchy for DBCore.
"""
from __future__ import division, absolute_import, print_function
import re
from operator import mul
from beets import util
from datetime import datetime, timedelta
import unicodedata
from functools import reduce
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
class ParsingError(ValueError):
"""Abstract class for any unparseable user-requested album/query
specification.
"""
class InvalidQueryError(ParsingError):
"""Represent any kind of invalid query.
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
message = u"'{0}': {1}".format(query, explanation)
super(InvalidQueryError, self).__init__(message)
class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = u"'{0}' is not {1}".format(what, expected)
if detail:
message = u"{0}: {1}".format(message, detail)
super(InvalidQueryArgumentValueError, self).__init__(message)
class Query(object):
"""An abstract class representing a query into the item database.
"""
def clause(self):
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause.
"""
return None, ()
def match(self, item):
"""Check whether this query matches a given Item. Can be used to
perform queries on arbitrary sets of Items.
"""
raise NotImplementedError
def __repr__(self):
return "{0.__class__.__name__}()".format(self)
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 0
class FieldQuery(Query):
"""An abstract query that searches in a specific field for a
pattern. Subclasses must provide a `value_match` class method, which
determines whether a certain pattern string matches a certain value
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
def __init__(self, field, pattern, fast=True):
self.field = field
self.pattern = pattern
self.fast = fast
def col_clause(self):
return None, ()
def clause(self):
if self.fast:
return self.col_clause()
else:
# Matching a flexattr. This is a slow query.
return None, ()
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings.
"""
raise NotImplementedError()
def match(self, item):
return self.value_match(self.pattern, item.get(self.field))
def __repr__(self):
return ("{0.__class__.__name__}({0.field!r}, {0.pattern!r}, "
"{0.fast})".format(self))
def __eq__(self, other):
return super(FieldQuery, self).__eq__(other) and \
self.field == other.field and self.pattern == other.pattern
def __hash__(self):
return hash((self.field, hash(self.pattern)))
class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field."""
def col_clause(self):
return self.field + " = ?", [self.pattern]
@classmethod
def value_match(cls, pattern, value):
return pattern == value
class NoneQuery(FieldQuery):
def __init__(self, field, fast=True):
super(NoneQuery, self).__init__(field, None, fast)
def col_clause(self):
return self.field + " IS NULL", ()
@classmethod
def match(cls, item):
try:
return item[cls.field] is None
except KeyError:
return True
def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value
may have any type.
"""
return cls.string_match(pattern, util.as_string(value))
@classmethod
def string_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings. Subclasses implement this method.
"""
raise NotImplementedError()
class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field."""
def col_clause(self):
pattern = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
search = '%' + pattern + '%'
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() in value.lower()
class RegexpQuery(StringFieldQuery):
"""A query that matches a regular expression in a specific item
field.
Raises InvalidQueryError when the pattern is not a valid regular
expression.
"""
def __init__(self, field, pattern, | fast=True):
super(RegexpQuery, self).__init__(field, pattern, fast)
pattern = self._normalize(pattern)
try:
self.pattern = re.compile(self.pattern)
except re.error as exc:
# Invalid regular expression.
raise Invali | dQueryArgumentValueError(pattern,
u"a regular expression",
format(exc))
@staticmethod
def _normalize(s):
"""Normalize a Unicode string's representation (used on both
patterns and matched values).
"""
return unicodedata.normalize('NFC', s)
@classmethod
def string_match(cls, pattern, value):
return pattern.search(cls._normalize(value)) is not None
class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(self, field, pattern, fast=True):
super(BooleanQuery, self).__init__(field, pattern, fast)
if isinstance(pattern, six.string_types):
self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern)
class BytesQuery(MatchQuery):
"""Match a raw bytes field (i.e., a path). This is a necessary hack
to work around the `sqlite3` module's desire to treat `bytes` and
`unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field, pattern):
super(BytesQuery, self).__init__(field, pattern)
# Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded |
chris1610/pbpython | code/advanced_excel.py | Python | bsd-3-clause | 2,204 | 0.002269 | """
See http://pbpython.com/advanced-excel-workbooks.html for details on this script
"""
from __future__ import print_function
import pandas as pd
from xlsxwriter.utility import xl_rowcol_to_cell
def format_excel(writer, df_size):
""" Add Excel specific formatting to the workbook
df_size is a tuple representing the size of the dataframe - typically called
by df.shape -> (20,3)
"""
# Get the workbook and the summary sheet so we can add the formatting
workbook = writer.book
worksheet = writer.sheets['summary']
# Add currency formatting and apply it
money_fmt = workbook.add_format({'num_format': 42, 'align': 'center'})
worksheet.set_column('A:A', 20)
worksheet.set_column('B:C', 15, money_fmt)
# Add 1 to row so we can include a total
# subtract 1 from the column to handle because we don't care about index
table_end = xl_rowc | ol_to_cell(df_size[0] + 1, df_size[1] - 1)
# This assumes we start in the left hand corner
table_range = 'A1:{}'.format(table_end)
worksheet.add_table(table_range, {'c | olumns': [{'header': 'account',
'total_string': 'Total'},
{'header': 'Total Sales',
'total_function': 'sum'},
{'header': 'Average Sales',
'total_function': 'average'}],
'autofilter': False,
'total_row': True,
'style': 'Table Style Medium 20'})
if __name__ == "__main__":
sales_df = pd.read_excel('https://github.com/chris1610/pbpython/blob/master/data/sample-salesv3.xlsx?raw=true')
sales_summary = sales_df.groupby(['name'])['ext price'].agg(['sum', 'mean'])
# Reset the index for consistency when saving in Excel
sales_summary.reset_index(inplace=True)
writer = pd.ExcelWriter('sales_summary.xlsx', engine='xlsxwriter')
sales_summary.to_excel(writer, 'summary', index=False)
format_excel(writer, sales_summary.shape)
writer.save()
|
MingfeiPan/leetcode | stack/394.py | Python | apache-2.0 | 616 | 0.003247 | class Solution:
def decodeString(self, s):
"""
:type s: str
:rtype: str
"""
stack = []
multi = 0
ret = ''
for c in s:
if c == '[':
| stack.append(ret)
stack.append(multi)
ret = ''
multi = 0
elif c == ']':
num = stack.pop()
| temp = stack.pop()
ret = temp + num*ret
elif c.isdigit():
multi = multi*10 + int(c)
else:
ret += c
return ret
|
jeremiahyan/odoo | addons/test_mail/tests/test_mail_thread_internals.py | Python | gpl-3.0 | 16,837 | 0.004158 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from unittest.mock import DEFAULT
from werkzeug.urls import url_parse, url_decode
from odoo import exceptions
from odoo.addons.test_mail.models.test_mail_models import MailTestSimple
from odoo.addons.test_mail.tests.common import TestMailCommon, TestRecipients
from odoo.test | s.common import tagged, HttpCase
from odoo.tools import mute_logger
class TestChatterTweaks(TestMailCommon, TestRecipients):
@classmethod
def setUpClass(cls):
super(TestChatterTweaks, cls).setUpClass()
cls.test_record = cls.env['mail.test.simple'].with_context(cls._test_context).create({'name': 'Test', 'email_from': 'ignasse@example.com'})
def test_post_no_subscribe_author(self):
original = self.test_record.message_follower_ids
self.test_record.with_u | ser(self.user_employee).with_context({'mail_create_nosubscribe': True}).message_post(
body='Test Body', message_type='comment', subtype_xmlid='mail.mt_comment')
self.assertEqual(self.test_record.message_follower_ids.mapped('partner_id'), original.mapped('partner_id'))
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_no_subscribe_recipients(self):
original = self.test_record.message_follower_ids
self.test_record.with_user(self.user_employee).with_context({'mail_create_nosubscribe': True}).message_post(
body='Test Body', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[self.partner_1.id, self.partner_2.id])
self.assertEqual(self.test_record.message_follower_ids.mapped('partner_id'), original.mapped('partner_id'))
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_subscribe_recipients(self):
original = self.test_record.message_follower_ids
self.test_record.with_user(self.user_employee).with_context({'mail_create_nosubscribe': True, 'mail_post_autofollow': True}).message_post(
body='Test Body', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[self.partner_1.id, self.partner_2.id])
self.assertEqual(self.test_record.message_follower_ids.mapped('partner_id'), original.mapped('partner_id') | self.partner_1 | self.partner_2)
def test_chatter_mail_create_nolog(self):
""" Test disable of automatic chatter message at create """
rec = self.env['mail.test.simple'].with_user(self.user_employee).with_context({'mail_create_nolog': True}).create({'name': 'Test'})
self.flush_tracking()
self.assertEqual(rec.message_ids, self.env['mail.message'])
rec = self.env['mail.test.simple'].with_user(self.user_employee).with_context({'mail_create_nolog': False}).create({'name': 'Test'})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 1)
def test_chatter_mail_notrack(self):
""" Test disable of automatic value tracking at create and write """
rec = self.env['mail.test.track'].with_user(self.user_employee).create({'name': 'Test', 'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 1,
"A creation message without tracking values should have been posted")
self.assertEqual(len(rec.message_ids.sudo().tracking_value_ids), 0,
"A creation message without tracking values should have been posted")
rec.with_context({'mail_notrack': True}).write({'user_id': self.user_admin.id})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 1,
"No new message should have been posted with mail_notrack key")
rec.with_context({'mail_notrack': False}).write({'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.message_ids), 2,
"A tracking message should have been posted")
self.assertEqual(len(rec.message_ids.sudo().mapped('tracking_value_ids')), 1,
"New tracking message should have tracking values")
def test_chatter_tracking_disable(self):
""" Test disable of all chatter features at create and write """
rec = self.env['mail.test.track'].with_user(self.user_employee).with_context({'tracking_disable': True}).create({'name': 'Test', 'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(rec.sudo().message_ids, self.env['mail.message'])
self.assertEqual(rec.sudo().mapped('message_ids.tracking_value_ids'), self.env['mail.tracking.value'])
rec.write({'user_id': self.user_admin.id})
self.flush_tracking()
self.assertEqual(rec.sudo().mapped('message_ids.tracking_value_ids'), self.env['mail.tracking.value'])
rec.with_context({'tracking_disable': False}).write({'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.sudo().mapped('message_ids.tracking_value_ids')), 1)
rec = self.env['mail.test.track'].with_user(self.user_employee).with_context({'tracking_disable': False}).create({'name': 'Test', 'user_id': self.user_employee.id})
self.flush_tracking()
self.assertEqual(len(rec.sudo().message_ids), 1,
"Creation message without tracking values should have been posted")
self.assertEqual(len(rec.sudo().mapped('message_ids.tracking_value_ids')), 0,
"Creation message without tracking values should have been posted")
def test_cache_invalidation(self):
""" Test that creating a mail-thread record does not invalidate the whole cache. """
# make a new record in cache
record = self.env['res.partner'].new({'name': 'Brave New Partner'})
self.assertTrue(record.name)
# creating a mail-thread record should not invalidate the whole cache
self.env['res.partner'].create({'name': 'Actual Partner'})
self.assertTrue(record.name)
class TestDiscuss(TestMailCommon, TestRecipients):
@classmethod
def setUpClass(cls):
super(TestDiscuss, cls).setUpClass()
cls.test_record = cls.env['mail.test.simple'].with_context(cls._test_context).create({
'name': 'Test',
'email_from': 'ignasse@example.com'
})
@mute_logger('openerp.addons.mail.models.mail_mail')
def test_mark_all_as_read(self):
def _employee_crash(*args, **kwargs):
""" If employee is test employee, consider he has no access on document """
recordset = args[0]
if recordset.env.uid == self.user_employee.id and not recordset.env.su:
if kwargs.get('raise_exception', True):
raise exceptions.AccessError('Hop hop hop Ernest, please step back.')
return False
return DEFAULT
with patch.object(MailTestSimple, 'check_access_rights', autospec=True, side_effect=_employee_crash):
with self.assertRaises(exceptions.AccessError):
self.env['mail.test.simple'].with_user(self.user_employee).browse(self.test_record.ids).read(['name'])
employee_partner = self.env['res.partner'].with_user(self.user_employee).browse(self.partner_employee.ids)
# mark all as read clear needactions
msg1 = self.test_record.message_post(body='Test', message_type='comment', subtype_xmlid='mail.mt_comment', partner_ids=[employee_partner.id])
self._reset_bus()
with self.assertBus(
[(self.cr.dbname, 'res.partner', employee_partner.id)],
message_items=[{
'type': 'mail.message/mark_as_read',
'payload': {
'message_ids': [msg1.id],
'needaction_inbox_counter': 0,
},
}]):
employee_partner.env['mail.message'].mark_all_as_read(domain=[])
na_count = employee_partner._get_needaction_count()
self.assertEqual(na_count |
SaikWolf/gnuradio | grc/core/utils/odict.py | Python | gpl-3.0 | 3,418 | 0.001755 | """
Copyright 2008-2015 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from UserDict import DictMixin
class odict(DictMixin):
def __init__(self, d={}):
self._keys = list(d.keys())
self._data = dict(d.co | py())
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
def keys(self):
return list(self._keys)
def copy(self):
co | py_dict = odict()
copy_dict._data = self._data.copy()
copy_dict._keys = list(self._keys)
return copy_dict
def insert_after(self, pos_key, key, val):
"""
Insert the new key, value entry after the entry given by the position key.
If the positional key is None, insert at the end.
Args:
pos_key: the positional key
key: the key for the new entry
val: the value for the new entry
"""
index = (pos_key is None) and len(self._keys) or self._keys.index(pos_key)
if key in self._keys:
raise KeyError('Cannot insert, key "{0}" already exists'.format(str(key)))
self._keys.insert(index+1, key)
self._data[key] = val
def insert_before(self, pos_key, key, val):
"""
Insert the new key, value entry before the entry given by the position key.
If the positional key is None, insert at the beginning.
Args:
pos_key: the positional key
key: the key for the new entry
val: the value for the new entry
"""
index = (pos_key is not None) and self._keys.index(pos_key) or 0
if key in self._keys:
raise KeyError('Cannot insert, key "{0}" already exists'.format(str(key)))
self._keys.insert(index, key)
self._data[key] = val
def find(self, key):
"""
Get the value for this key if exists.
Args:
key: the key to search for
Returns:
the value or None
"""
if key in self:
return self[key]
return None
def findall(self, key):
"""
Get a list of values for this key.
Args:
key: the key to search for
Returns:
a list of values or empty list
"""
obj = self.find(key)
if obj is None:
obj = list()
if isinstance(obj, list):
return obj
return [obj]
def clear(self):
self._data.clear()
del self._keys[:] |
email2liyang/pypoll | mysite/books/forms.py | Python | mit | 445 | 0.008989 | from django import forms
class ContactForm(forms.Form):
subject = forms.CharField(required=True,max_length=4)
email = forms.Ema | ilField(required=False)
message = forms.CharField(required=True,widget=forms.Textarea)
def clean_message(self):
message = self.cleaned_data['message']
num_words = len(message)
if num_words < 3:
raise forms.ValidationError( | "not enouth words")
return message |
mhugent/Quantum-GIS | python/plugins/processing/algs/gdal/sieve.py | Python | gpl-2.0 | 3,202 | 0.000312 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sieve.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
************************************************************** | *************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui, QtCore
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.Param | eterNumber import ParameterNumber
from processing.outputs.OutputRaster import OutputRaster
from processing.tools.system import *
from processing.algs.gdal.GdalUtils import GdalUtils
class sieve(GeoAlgorithm):
INPUT = 'INPUT'
THRESHOLD = 'THRESHOLD'
CONNECTIONS = 'CONNECTIONS'
OUTPUT = 'OUTPUT'
PIXEL_CONNECTIONS = ['4', '8']
def getIcon(self):
filepath = os.path.dirname(__file__) + '/icons/sieve.png'
return QtGui.QIcon(filepath)
def defineCharacteristics(self):
self.name = 'Sieve'
self.group = '[GDAL] Analysis'
self.addParameter(ParameterRaster(self.INPUT, 'Input layer', False))
self.addParameter(ParameterNumber(self.THRESHOLD, 'Threshold', 0,
9999, 2))
self.addParameter(ParameterSelection(self.CONNECTIONS,
'Pixel connection', self.PIXEL_CONNECTIONS, 0))
self.addOutput(OutputRaster(self.OUTPUT, 'Output layer'))
def processAlgorithm(self, progress):
output = self.getOutputValue(self.OUTPUT)
arguments = []
arguments.append('-st')
arguments.append(str(self.getParameterValue(self.THRESHOLD)))
arguments.append('-' +
self.PIXEL_CONNECTIONS[self.getParameterValue(
self.CONNECTIONS)])
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append(self.getParameterValue(self.INPUT))
arguments.append(output)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'gdal_sieve.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['gdal_sieve.py', GdalUtils.escapeAndJoin(arguments)]
GdalUtils.runGdal(commands, progress)
|
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/speech_commands/train.py | Python | mit | 16,617 | 0.007041 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It can download the necessary training data,
and runs with reasonable defaults to train within a few hours even only using a
CPU. For more information see http://tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count' | ]
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.00 | 01
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = map(int, FLAGS.how_many_training_steps.split(','))
learning_rates_list = map(float, FLAGS.learning_rate.split(','))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.float32, [None, label_count], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits))
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_step = tf.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
expected_indices = tf.argmax(ground_truth_input, 1)
correct_prediction = tf.equal(predicted_indices, expected_indices)
confusion_matrix = tf.confusion_matrix(expected_indices, predicted_indices)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning |
diogo149/treeano | examples/mnist_ml_l2.py | Python | apache-2.0 | 3,309 | 0.000302 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import itertools
import numpy as np
import sklearn.datasets
import sklearn.cross_validation
import sklearn.metrics
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
import canopy.sandbox.datasets
fX = theano.config.floatX
# ############################### prepare data ###############################
train, valid, test = canopy.sandbox.datasets.mnist()
# ############################## prepare model ##############################
# architecture:
# - fully connected 512 units
# - ReLU
# - 50% dropout
# - fully connected 512 units
# - ReLU
# - 50% dropout
# - fully connected 10 units
# - softmax
# - the batch size can be provided as `None` to make the network
# work for multiple different batch sizes
model = tn.HyperparameterNode(
"model",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(None, 1, 28, 28)),
tn.DenseNode("fc1"),
tn.ReLUNode("relu1"),
tn.DropoutNode("do1"),
tn.DenseNode("fc2"),
tn.ReLUNode("relu2"),
tn.DropoutNode("do2"),
tn.DenseNode("fc3", num_units=10),
tn.SoftmaxNode("pred"),
tn.AuxiliaryCostNode(
"cls_cost",
{"target": tn.InputNode("y", shape=(None,), dtype="int32")}),
]),
num_units=512,
dropout_probability=0.5,
inits=[treeano.inits.XavierNormalInit()],
)
model = tn.L2PenaltyNode(
"l2_cost",
model,
l2_weight=0.0001,
)
with_updates = tn.HyperparameterNode(
"with_updates",
tn.AdamNode(
"adam",
{"subtree": model,
"cost": tn.InputElementwiseSumNode("cost")}),
cost_function=treeano.utils.categorical_crossentropy_i32,
cost_reference="cost",
)
network = with_updates.network()
network.build() # build eagerly to share weights
BATCH_SIZE = 500
valid_fn = canopy.handled_fn(
| network,
[canopy.handlers.time_call(key="valid_time"),
canopy.handlers.override_hyperparameters(dropout_probability=0),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"cost": "cost", "pred": "pred"})
def validate(in_dict, re | sults_dict):
valid_out = valid_fn(valid)
probabilities = valid_out["pred"]
predicted_classes = np.argmax(probabilities, axis=1)
results_dict["valid_cost"] = valid_out["cost"]
results_dict["valid_time"] = valid_out["valid_time"]
results_dict["valid_accuracy"] = sklearn.metrics.accuracy_score(
valid["y"], predicted_classes)
train_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="total_time"),
canopy.handlers.call_after_every(1, validate),
canopy.handlers.time_call(key="train_time"),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"train_cost": "cost"},
include_updates=True)
# ################################# training #################################
print("Starting training...")
canopy.evaluate_until(fn=train_fn,
gen=itertools.repeat(train),
max_iters=25)
|
mitsuhiko/pip | tests/functional/test_requests.py | Python | mit | 366 | 0.010929 | import pytest
@pytest.mark.skipif
def t | est_timeout(script):
result = script.pip("--timeout", "0.01", "install", "-vvv", "INITools",
expect_error=True,
)
assert "Could not fetch URL https://pypi.python. | org/simple/INITools/: timed out" in result.stdout
assert "Could not fetch URL https://pypi.python.org/simple/: timed out" in result.stdout
|
giorgioladu/Rapa | portal.py | Python | apache-2.0 | 987 | 0 | # -*- coding: utf-8 -*-
#
# portal.py # # # # # # # # # #
# | #
# Copyright 2016 Giorgio Ladu <giorgio.ladu >at< gmail.com> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENS | E-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # # # # # #
import config
print 'Status: 302 Found'
print 'Location: http://' + config.custom_url
print ''
|
andree1320z/deport-upao-web | deport_upao/apps/productos/urls.py | Python | mit | 176 | 0 | from django.conf.urls import url
from .views import list_products
app_na | me = "producto"
urlpatterns = [ |
url(r'list-productos/$', list_products, name='list_products'),
]
|
rahulvgmail/TripMapR | TripMapR/travelogue/migrations/0005_auto_20150420_1747.py | Python | bsd-3-clause | 770 | 0.002597 | # -*- coding: utf-8 -*-
from __future__ import unicode_li | terals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('travelogue', '0004_auto_20150418_1655'),
]
operations = [
migrations.AddField(
model_name='tripnote',
name='date_taken',
field=models.DateTimeField(verbose_name='date note captured by user on the field', null | =True, editable=False, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='tripnote',
name='view_count',
field=models.PositiveIntegerField(default=0, verbose_name='view count', editable=False),
preserve_default=True,
),
]
|
resamsel/dbmanagr | src/tests/test_options.py | Python | gpl-3.0 | 4,176 | 0 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 René Samselnig
#
# This file is part of Database Navigator.
#
# Database Navigator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Database Navigator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Database Navigator. If not, see <http://www.gnu.org/licenses/>.
#
from datetime import datetime
from tests.testcase import DbTestCase
from tests.mock.sources import MockSource
from dbmanagr import options
class OptionsTestCase(DbTestCase):
def test_escape_keyword(self):
"""Tests the options.escape_keyword function"""
self.assertEqual(
'a',
options.escape_keyword('a'))
self.assertEqual(
'"user"',
options.escape_keyword('user'))
self.assertEqual(
'"table"',
options.escape_keyword('table'))
def test_format_value(self):
"""Tests the options.format_value function"""
DbTestCase.connection.close()
DbTestCase.connection = MockSource().list()[0]
DbTestCase.connection.connect()
con = DbTestCase.connection
user = con.table('user')
user2 = con.table('user2')
article = con.table('article')
now = datetime.now()
self.assertEqual(
u'null',
options.format_value(None, None)
)
self.assertEqual(
| '1',
options.format_value(None, 1)
)
self.assertEqual(
"'d'",
options.format_value(None, 'd')
)
self.assertEqual(
u'7',
options.format_value(user.column('id'), 7)
)
self.assertEqual(
u"'a'",
options. | format_value(user.column('id'), 'a')
)
self.assertEqual(
'null',
options.format_value(user.column('id'), None)
)
self.assertEqual(
'true',
options.format_value(user2.column('deleted'), True)
)
self.assertEqual(
'3.141500',
options.format_value(user2.column('score'), 3.1415)
)
self.assertEqual(
"'3.14.15'",
options.format_value(user2.column('score'), '3.14.15')
)
self.assertEqual(
"'{}'".format(str(now)),
options.format_value(article.column('created'), now)
)
self.assertEqual(
"('a', 'b')",
options.format_value(article.column('id'), ['a', 'b'])
)
self.assertEqual(
u"'[BLOB]'",
options.format_value(article.column('id'), buffer('abc'))
)
def test_restriction(self):
"""Tests the options.restriction function"""
con = DbTestCase.connection
user = con.table('user')
self.assertEqual(
u'a.id is null',
options.restriction('a', user.column('id'), '=', None))
self.assertEqual(
u'id is null',
options.restriction(None, user.column('id'), '=', None))
self.assertEqual(
u'a.id = 7',
options.restriction('a', user.column('id'), '=', 7))
self.assertEqual(
u"a.id = 'a'",
options.restriction('a', user.column('id'), '=', 'a'))
self.assertEqual(
u"id = 'a'",
options.restriction(None, user.column('id'), '=', 'a'))
self.assertRaises(
Exception,
options.restriction,
None, None, None, None
)
def test_options_parser(self):
"""Tests the options OptionsParser class"""
self.assertEqual(
None,
options.OptionsParser().parse(None)
)
|
ripiuk/fant_sizer | setup.py | Python | mit | 1,096 | 0 | from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name="fant_sizer",
version="0.7",
author="Rypiuk Oleksandr",
author_email="ripiuk96@gmail.com",
description="fant_sizer command-line file-information",
url="https://github.com/ripiuk/fant_sizer",
keywords="file command-line information size tool recursively",
license="MIT",
classifiers=[
'Topic :: Utilities',
'Environmen | t :: Console',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Intended Au | dience :: Developers',
'Intended Audience :: Information Technology',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6'
],
packages=find_packages(),
long_description=open(join(dirname(__file__), "README.rst")).read(),
entry_points={
"console_scripts":
['fant_sizer = fant_sizer.fant_sizer:_main'],
},
)
|
tomzx/decision-trees | clothes.py | Python | mit | 145 | 0.075862 | from enum import Enum
class Clothes(Enum):
Underwear = 1
Pants = 2
Shorts = 3
Socks = 4
| Shoes = 5
Shirt | = 6
Coat = 7
Hat = 8
Gloves = 9 |
lele94218/social_network_paper | facebook/getdata/old_clustering_example.py | Python | gpl-2.0 | 1,361 | 0.012491 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 03 10:16:52 2015
@author: Keine
"""
import sqlite3
cx = sqlite3.connect("../text2DB/get_data.db")
distxy = [([0.0] * 49) for i in range(49)]
cu = cx.cursor()
for i in range(49):
for j in range(49):
if i == j:
distxy[i-1][j-1] = 0.0
else:
print i
print j
sql = cu.execute("""select similarity from old_similarity where id1 = ? and id2 = ?""", (i,j))
if sql.fetchall() == []:
sim = 0
else:
sql = cu.execute("""select similarity from old_similarity where id1 = ? and id2 = ?""", (i,j))
sim = float(sql.fetchone()[0])
distxy[i][j] = sim
cx.close();
#pri | nt distxy[49-1][48-1]
#from scipy.cluster.hierarchy import linkage, dendrogram
#R = dendrogram(linkage(distxy, method='complete'))
#suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14);
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster. | hierarchy import linkage, dendrogram
data_dist = pdist(distxy) # computing the distance
data_link = linkage(data_dist) # computing the linkage
dendrogram(data_link)
plt.xlabel('User_ID')
plt.ylabel('Similarity ratio')
plt.suptitle('Hierarchy Clustering', fontweight='bold', fontsize=14);
|
nicko96/Chrome-Infra | infra/services/builder_alerts/__main__.py | Python | bsd-3-clause | 12,085 | 0.009516 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import cStringIO
import datetime
import gzip
import json
import logging
import multiprocessing
import os
import sys
import traceback
import requests
import requests_cache
from infra_libs import logs
from infra.libs.service_utils import outer_loop
from infra.services.builder_alerts import analysis
from infra.services.builder_alerts import buildbot
from infra.services.builder_alerts import gatekeeper_extras
from infra.services.builder_alerts import alert_builder
import infra
infra_module_path = os.path.dirname(os.path.abspath(infra.__file__))
infra_dir = os.path.dirname(infra_module_path)
top_dir = os.path.dirname(infra_dir)
build_scripts_dir = os.path.join(top_dir, 'build', 'scripts')
sys.path.insert(0, build_scripts_dir)
# Our sys.path hacks are too bursting with chest-hair for pylint's little brain.
from slave import gatekeeper_ng_config # pylint: disable=F0401
CACHE_PATH = 'build_cache'
# We have 13 masters. No point in spawning more processes
PARALLEL_TASKS = 13
CONCURRENT_TASKS = 16
class SubProcess(object):
def __init__(self, cache, old_alerts, builder_filter, jobs):
super(SubProcess, self).__init__()
self._cache = cache
self._old_alerts = old_alerts
self._builder_filter = builder_filter
self._jobs = jobs
def __call__(self, master_url):
try:
master_json = buildbot.fetch_master_json(master_url)
if not master_json:
return (None, None, None, master_url)
master_alerts, stale_master_alert = alert_builder.alerts_for_master(
self._cache, master_url, master_json, self._old_alerts,
self._builder_filter, self._jobs)
| # FIXME: The builder info doesn't really belong here. The builder
# revisions tool uses this and we happen to have the builder json cached
# at this point so it's cheap to compute, but it should be moved
# to a different feed.
data, stale_builder_alerts = (
buildbot.latest_builder_info_and_alerts_for_master(
self._cache, master_url, master_json))
if stale_master_alert:
stale_builder_alerts.append(stale_ma | ster_alert)
return (master_alerts, data, stale_builder_alerts, master_url)
except:
# Put all exception text into an exception and raise that so it doesn't
# get eaten by the multiprocessing code.
raise Exception(''.join(traceback.format_exception(*sys.exc_info())))
def query_findit(findit_api_url, alerts):
"""Get analysis results from Findit for failures in the given alerts.
Args:
findit_api_url (str): The URL to findit's api for build failure analysis.
alerts (list): A non-empty list of failure alerts.
Returns:
A list of analysis results in the following form (could be an empty list):
[
{
"master_url": "https://build.chromium.org/p/chromium.chromiumos",
"builder_name": "Linux ChromiumOS GN",
"build_number": 6146,
"step_name": "compile",
"first_known_failed_build_number": 6146,
"is_sub_test": false,
"suspected_cls": [
{
"repo_name": "chromium",
"revision": "ed1e90f4f980709cef6a8a9c7e0f64cfe5578cdd",
"commit_position": 311460,
}
]
},
{
"master_url": "https://build.chromium.org/p/chromium.linux",
"builder_name": "Linux Tests",
"build_number": 1234,
"step_name": "browser_tests",
"first_known_failed_build_number": 1232,
"is_sub_test": true,
"test_name": "TestSuite.TestName",
"suspected_cls": [
{
"repo_name": "chromium",
"revision": "another_git_hash",
"commit_position": 23456,
}
]
}
]
"""
# Alerts are per-step or per-reason, but analysis of build failures by Findit
# is per-build. Thus use a dict to de-duplicate.
builds = {}
for alert in alerts:
master_url = alert['master_url']
builder_name = alert['builder_name']
build_number = alert['last_failing_build']
key = '%s-%s-%d' % (master_url, builder_name, build_number)
if key not in builds:
builds[key] = {
'master_url': master_url,
'builder_name': builder_name,
'build_number': build_number,
'failed_steps': [alert['step_name']],
}
elif alert['step_name'] not in builds[key]['failed_steps']:
builds[key]['failed_steps'].append(alert['step_name'])
try:
headers = {'Content-type': 'application/json'}
data_json = {'builds': builds.values()}
logging.debug('Request to findit:\n%s', json.dumps(data_json, indent=2))
start_time = datetime.datetime.utcnow()
response = requests.post(findit_api_url, data=json.dumps(data_json),
headers=headers, timeout=60)
logging.info('Query Findit took: %s seconds.',
(datetime.datetime.utcnow() - start_time).total_seconds())
if response.status_code != 200:
logging.error('Findit response status code:%d, content:%s',
response.status_code, response.text)
return []
response_json = response.json()
logging.debug(
'Response from findit:\n%s', json.dumps(response_json, indent=2))
return response_json.get('results', [])
except (requests.Timeout, ValueError, Exception):
# TODO(crbug.com/468161): remove the "Exception" from the list above.
# For now, it is to make sure any break on Findit side won't impact the
# rest of builder_alerts.
logging.exception('Failed to incorporate result from Findit.')
return []
def inner_loop(args):
if not args.data_url:
logging.warn('No /data url passed, will write to builder_alerts.json')
if args.use_cache:
requests_cache.install_cache('failure_stats')
else:
requests_cache.install_cache(backend='memory')
# FIXME: gatekeeper_config should find gatekeeper.json for us.
gatekeeper_path = os.path.abspath(args.gatekeeper)
logging.debug('Processsing gatekeeper json: %s', gatekeeper_path)
gatekeeper = gatekeeper_ng_config.load_gatekeeper_config(gatekeeper_path)
gatekeeper_trees_path = os.path.abspath(args.gatekeeper_trees)
logging.debug('Processing gatekeeper trees json: %s', gatekeeper_trees_path)
gatekeeper_trees = gatekeeper_ng_config.load_gatekeeper_tree_config(
gatekeeper_trees_path)
master_urls = gatekeeper_extras.fetch_master_urls(gatekeeper, args)
start_time = datetime.datetime.utcnow()
cache = buildbot.DiskCache(CACHE_PATH)
old_alerts = {}
if args.data_url:
try:
old_alerts_raw = requests.get(args.data_url[0]).json()
except ValueError:
logging.debug('No old alerts found.')
else:
# internal-alerts will have a redirect instead of alerts if you're
# signed in.
if 'alerts' in old_alerts_raw:
for alert in old_alerts_raw['alerts']:
master = alert['master_url']
builder = alert['builder_name']
step = alert['step_name']
reason = alert['reason']
alert_key = alert_builder.generate_alert_key(
master, builder, step, reason)
if alert_key in old_alerts:
logging.critical(
'Incorrectly overwriting an alert reason from the'
' old alert data. master: %s, builder: %s, step: %s, reason:'
' %s' % (master, builder, step, reason))
old_alerts[alert_key] = alert
latest_builder_info = {}
stale_builder_alerts = []
missing_masters = []
alerts = []
suspected_cls = []
pool = multiprocessing.Pool(processes=args.processes)
master_datas = pool.map(SubProcess(cache, old_alerts, args.builder_filter,
args.jobs), master_urls)
pool.close()
pool.join()
for data in master_datas:
# TODO(ojan): We should put an alert in the JSON for this master so
# we can show that the master is down in the sheriff-o-matic UI.
if not data[0 |
yangshun/cs4243-project | app/surface.py | Python | mit | 3,220 | 0.000932 | import numpy as np
class Surface(object):
def __init__(self, image, edge_points3d, edge_points2d):
"""
Constructor for a surface defined by a texture image and
4 boundary points. Choose the first point as the origin
of the surface's coordinate system.
:param image: image array
:param edge_points3d: array of 3d coordinates of 4 corner points in clockwise direction
:param edge_points2d: array of 2d coordinates of 4 corner points in clockwise direction
"""
assert len(edge_points3d) == 4 and len(edge_points2d) == 4
self.image = image
self.edge_points3d = edge_points3d
self.edge_points2d = np.float32(edge_points2d) # This is required for using cv2's getPerspectiveTransform
self.normal = self._get_normal_vector()
def top_left_corner3d(self):
return self.edge_points3d[0]
def top_right_corner3d(self):
return self.edge_points3d[1]
def bottom_right_corner3d(self):
return self.edge_points3d[2]
def bottom_left_corner3d(self):
return self.edge_points3d[3]
def distance_to_point(self, point):
point_to_surface = point - self.top_left_corner3d()
distance_to_surface = self.normal.dot(point_to_surface)
return distance_to_surface
def _get_normal_vector(self):
"""
:return: the normal vector of the surface. It determined the front side
of the surface and it's not necessarily a unit vector
"""
p0 = self.edge_points3d[0]
p1 = self.edge_points3d[1]
p3 = self.edge_points3d[3]
v1 = p3 - p0
v2 = p1 - p0
normal = np.cross(v1, v2)
norm = np.linalg.norm(normal)
return normal / norm
class Polyhedron(object):
def __init__(self, surfaces):
self.surfaces = surfaces
class Space(object):
def __init__(self, models=None):
self.models = models or []
def add_model(self, model):
assert isinstanc | e(model, Polyhedron)
self.models.append(model)
class Line2D(object):
def __init_ | _(self, point1, point2):
"""
Using the line equation a*x + b*y + c = 0 with b >= 0
:param point1: starting point
:param point2: ending point
:return: a Line object
"""
assert len(point1) == 2 and len(point2) == 2
self.a = point2[1] - point1[1]
self.b = point1[0] - point2[0]
self.c = point1[1] * point2[0] - point1[0] * point2[1]
if self.b < 0:
self.a = -self.a
self.b = -self.b
self.c = -self.c
def is_point_on_left(self, point):
return self.a * point[0] + self.b * point[1] + self.c > 0
def is_point_on_right(self, point):
return self.a * point[0] + self.b * point[1] + self.c < 0
def is_point_on_line(self, point):
return self.a * point[0] + self.b * point[1] + self.c == 0
def get_y_from_x(self, x):
if self.b == 0:
return 0.0
return 1.0 * (-self.c - self.a * x) / self.b
def get_x_from_y(self, y):
if self.a == 0:
return 0.0
return 1.0 * (-self.c - self.b * y) / self.a
|
Rusk85/pyload | module/plugins/hoster/VeehdCom.py | Python | gpl-3.0 | 2,311 | 0.001731 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
class VeehdCom(Hoster):
__name__ = 'VeehdCom'
__type__ = 'hoster'
__pattern__ = r'http://veehd\.com/video/\d+_\S+'
__config__ = [
('filename_spaces', 'bool', "Allow spaces in filename", 'False'),
('replacement_char', 'str', "Filename replacement character", '_'),
]
__version__ = '0.23'
__description__ = """Veehd.com Download Hoster"""
__author_name__ = ('cat')
__author_mail__ = ('cat@pyload')
def _debug(self, msg):
self.logDebug('[%s] %s' % (self.__name__, msg))
def setup(self):
self.html = None
self.multiDL = True
self.req.canContinue = True
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self._debug("Requesting page: %s" % (repr(url),))
self.html = se | lf.load(url)
def file_exists(self):
if self.html is None:
self.download_html()
if '<title>Veehd</title>' in self.html:
return False
return True
def get_file_name(self):
if self.html is None:
self.download_html()
match = re.search(r'<title[^>]*>([^<]+) on Veehd</title>', self.html)
if not match:
| self.fail("video title not found")
name = match.group(1)
# replace unwanted characters in filename
if self.getConfig('filename_spaces'):
pattern = '[^0-9A-Za-z\.\ ]+'
else:
pattern = '[^0-9A-Za-z\.]+'
name = re.sub(pattern, self.getConfig('replacement_char'),
name)
return name + '.avi'
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if self.html is None:
self.download_html()
match = re.search(r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/[^"]+)"',
self.html)
if not match:
self.fail("embedded video url not found")
file_url = match.group(1)
return file_url
|
rolandgeider/wger | wger/manager/tests/test_schedule.py | Python | agpl-3.0 | 23,910 | 0.001046 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
import logging
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
STATUS_CODES_FAIL,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase
)
from wger.manager.models import (
Schedule,
ScheduleStep,
Workout
)
from wger.utils.helpers import make_token
logger = logging.getLogger(__name__)
class ScheduleShareButtonTestCase(WgerTestCase):
"""
Test that the share button is correctly displayed and hidden
"""
def test_share_button(self):
workout = Workout.objects.get(pk=2)
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
class ScheduleAccessTestCase(WgerTestCase):
"""
Test accessing the workout page
"""
def test_access_shared(self):
"""
Test accessing the URL of a shared workout
"""
workout = Schedule.objects.get(pk=2)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
"""
Test accessing the URL of a private workout
"""
| workout = Schedule.objects.get(pk=1)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.as | sertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
class ScheduleRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(Schedule.objects.get(pk=1)),
'my cool schedule that i found on the internet')
class CreateScheduleTestCase(WgerAddTestCase):
"""
Tests adding a schedule
"""
object_class = Schedule
url = 'manager:schedule:add'
user_success = 'test'
user_fail = False
data = {'name': 'My cool schedule',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
class DeleteScheduleTestCase(WgerDeleteTestCase):
"""
Tests deleting a schedule
"""
object_class = Schedule
url = 'manager:schedule:delete'
pk = 1
user_success = 'test'
user_fail = 'admin'
class EditScheduleTestCase(WgerEditTestCase):
"""
Tests editing a schedule
"""
object_class = Schedule
url = 'manager:schedule:edit'
pk = 3
data = {'name': 'An updated name',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
class ScheduleTestCase(WgerTestCase):
"""
Other tests
"""
def schedule_detail_page(self):
"""
Helper function
"""
response = self.client.get(reverse('manager:schedule:view', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This schedule is a loop')
schedule = Schedule.objects.get(pk=2)
schedule.is_loop = False
schedule.save()
response = self.client.get(reverse('manager:schedule:view', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'This schedule is a loop')
def test_schedule_detail_page_owner(self):
"""
Tests the schedule detail page as the owning user
"""
self.user_login()
self.schedule_detail_page()
def test_schedule_overview(self):
"""
Tests the schedule overview
"""
self.user_login()
response = self.client.get(reverse('manager:schedule:overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['schedules']), 3)
self.assertTrue(response.context['schedules'][0].is_active)
schedule = Schedule.objects.get(pk=4)
schedule.is_active = False
schedule.save()
response = self.client.get(reverse('manager:schedule:overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['schedules']), 3)
for i in range(0, 3):
self.assertFalse(response.context['schedules'][i].is_active)
def test_schedule_active(self):
"""
Tests that only one schedule can be active at a time (per user)
"""
def get_schedules():
schedule1 = Schedule.objects.get(pk=2)
schedule2 = Schedule.objects.get(pk=3)
schedule3 = Schedule.objects.get(pk=4)
return (schedule1, schedule2, schedule3)
self.user_login()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertTrue(schedule3.is_active)
schedule1.is_active = True
schedule1.save()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertTrue(schedule1.is_active)
self.assertFalse(schedule2.is_active)
self.assertFalse(schedule3.is_active)
schedule2.is_active = True
schedule2.save()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertFalse(schedule1.is_active)
self.assertTrue(schedule2.is_active)
self.assertFalse(schedule3.is_active)
def start_schedule(self, fail=False):
"""
Helper function
"""
schedule = Schedule.objects.get(pk=2)
self.assertFalse(schedule.is_active)
self.assertNotEqual(schedule.start_date, datetime.date.today())
response = self.client.get(reverse('manager:schedule:start', kwargs={'pk': 2}))
schedule = Schedule.objects.get(pk=2)
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertFalse(schedule.is_active)
self.assertNotEqual(schedule.start_date, datetime.date.today())
else:
self.assertEqual(response.status_code, 302)
self.assertTrue(schedule.is_active)
self.assertEqual(schedule.start_date, datetime.date.today())
def test_start_schedule_owner(self):
"""
Tests starting a schedule as the owning user
"""
self.user_login()
self.start_schedule()
def test_start_schedule_other(self):
"""
Tests starting a schedule as a different user
"""
self.user_login('test')
self.start_schedule(fail=True)
def test_start |
frankrousseau/weboob | modules/arte/browser.py | Python | agpl-3.0 | 9,423 | 0.001804 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import time
import urllib
from weboob.capabilities import NotAvailable
from weboob.capabilities.image import BaseImage
from weboob.tools.json import json as simplejson
from weboob.deprecated.browser import Browser
from weboob.deprecated.browser.decorators import id2url
from .pages import ArteLivePage, ArteLiveVideoPage
from .video import ArteVideo, ArteLiveVideo
__all__ = ['ArteBrowser']
class ArteBrowser(Browser):
DOMAIN = u'videos.arte.tv'
ENCODING = None
PAGES = {r'http://concert.arte.tv/\w+': ArteLivePage,
r'http://concert.arte.tv/(?P<id>.+)': ArteLiveVideoPage,
}
LIVE_LANG = {'F': 'fr',
'D': 'de'
}
API_URL = 'http://arte.tv/papi/tvguide'
def __init__(self, lang, quality, order, *args, **kwargs):
self.lang = lang
self.quality = quality
self.order = order
Browser.__init__(self, *args, **kwargs)
@id2url(ArteVideo.id2url)
def get_video(self, url, video=None):
response = self.openurl('%s/ALL.json' % url)
result = simplejson.loads(response.read(), self.ENCODING)
if video is None:
video = self.create_video(result['video'])
try:
video.url = self.get_m3u8_link(result['video']['VSR'][0]['VUR'])
video.ext = u'm3u8'
except:
video.url, video.ext = NotAvailable, NotAvailable
return video
def get_m3u8_link(self, url):
r = self.openurl(url)
baseurl = url.rpartition('/')[0]
links_by_quality = []
for line in r.readlines():
if not line.startswith('#'):
links_by_quality.append(u'%s/%s' % (baseurl, line.replace('\n', '')))
if len(links_by_quality):
try:
return links_by_quality[self.quality[1]]
except:
return links_by_quality[0]
return NotAvailable
@id2url(ArteLiveVideo.id2url)
def get_live_video(self, url, video=None):
self.location(url)
assert self.is_on_page(ArteLiveVideoPage)
json_url, video = self.page.get_video(video)
return self.fill_live_video(video, json_url)
def fill_live_video(self, video, json_url):
response = self.openurl(json_url)
result = simplejson.loads(response.read(), self.ENCODING)
quality = None
if 'VTI' in result['videoJsonPlayer']:
video.title = u'%s' % result['videoJsonPlayer']['VTI']
if 'VSR' in result['videoJsonPlayer']:
for item in result['videoJsonPlayer']['VSR']:
if self.quality[0] in item:
quality = item
break
if not quality:
url = result['videoJsonPlayer']['VSR'][0]['url']
ext = result['videoJsonPlayer']['VSR'][0]['mediaType']
else:
url = result['videoJsonPlayer']['VSR'][quality]['url']
ext = result['videoJsonPlayer']['VSR'][quality]['mediaType']
video.url = u'%s' % url
video.ext = u'%s' % ext
if 'VDA' in result['videoJsonPlayer']:
date_string = result['videoJsonPlayer']['VDA'][:-6]
try:
video.date = datetime.datetime.strptime(date_string, '%d/%m/%Y %H:%M:%S')
except TypeError:
video.date = datetime.datetime(*(time.strptime(date_string, '%d/%m/%Y %H:%M:%S')[0:6]))
if 'VDU' in result['videoJsonPlayer'].keys():
video.duration = int(result['videoJsonPlayer']['VDU'])
if 'IUR | ' in result['videoJsonPlayer']['VTU'].keys():
| video.thumbnail = BaseImage(result['videoJsonPlayer']['VTU']['IUR'])
video.thumbnail.url = video.thumbnail.id
return video
def home(self):
self.location('http://videos.arte.tv/%s/videos/toutesLesVideos' % self.lang)
def get_video_from_program_id(self, _id):
class_name = 'epg'
method_name = 'program'
level = 'L2'
url = self.API_URL \
+ '/' + class_name \
+ '/' + method_name \
+ '/' + self.lang \
+ '/' + level \
+ '/' + _id \
+ '.json'
response = self.openurl(url)
result = simplejson.loads(response.read(), self.ENCODING)
if 'VDO' in result['abstractProgram'].keys():
video = self.create_video(result['abstractProgram']['VDO'])
return self.get_video(video.id, video)
def search_videos(self, pattern):
class_name = 'videos/plus7'
method_name = 'search'
level = 'L1'
cluster = 'ALL'
channel = 'ALL'
limit = '10'
offset = '0'
url = self.create_url_plus7(class_name, method_name, level, cluster, channel, limit, offset, pattern)
response = self.openurl(url)
result = simplejson.loads(response.read(), self.ENCODING)
return self.create_video_from_plus7(result['videoList'])
def create_video_from_plus7(self, result):
for item in result:
yield self.create_video(item)
def create_video(self, item):
video = ArteVideo(item['VID'])
if 'VSU' in item:
video.title = u'%s : %s' % (item['VTI'], item['VSU'])
else:
video.title = u'%s' % (item['VTI'])
video.rating = int(item['VRT'])
if 'programImage' in item:
url = u'%s' % item['programImage']
video.thumbnail = BaseImage(url)
video.thumbnail.url = video.thumbnail.id
video.duration = datetime.timedelta(seconds=int(item['videoDurationSeconds']))
video.set_empty_fields(NotAvailable, ('url',))
if 'VDE' in item:
video.description = u'%s' % item['VDE']
if 'VDA' in item:
m = re.match('(\d{2})\s(\d{2})\s(\d{4})(.*?)', item['VDA'])
if m:
dd = int(m.group(1))
mm = int(m.group(2))
yyyy = int(m.group(3))
video.date = datetime.date(yyyy, mm, dd)
return video
def create_url_plus7(self, class_name, method_name, level, cluster, channel, limit, offset, pattern=None):
url = self.API_URL \
+ '/' + class_name \
+ '/' + method_name \
+ '/' + self.lang \
+ '/' + level
if pattern:
url += '/' + urllib.quote(pattern.encode('utf-8'))
url += '/' + channel \
+ '/' + cluster \
+ '/' + '-1' \
+ '/' + self.order \
+ '/' + limit \
+ '/' + offset \
+ '.json'
return url
def get_arte_programs(self):
class_name = 'epg'
method_name = 'clusters'
url = self.API_URL \
+ '/' + class_name \
+ '/' + method_name \
+ '/' + self.lang \
+ '/0/ALL.json'
response = self.openurl(url)
result = simplejson.loads(response.read(), self.ENCODING)
return result['configClusterList']
def program_videos(self, program):
class_name = 'epg'
method_name = 'cluster'
url = self.API_URL \
+ '/' + class_name \
+ '/' + method_name \
+ '/' + self.lang \
+ '/' + program \
+ '.json'
response = self |
jmchilton/lwr | galaxy/jobs/metrics/collectl/cli.py | Python | apache-2.0 | 5,442 | 0.018927 | from string import Template
import subprocess
import logging
log = logging.getLogger( __name__ )
COMMAND_LINE_TEMPLATE = Template(
"$collectl_path $destination_arg $mode_arg $subsystems_arg $interval_arg $procfilt_arg $flush_arg $sep_arg"
)
MODE_RECORD = "record"
MODE_PLAYBACK = "playback"
class CollectlCli( object ):
""" Abstraction over (some of) the command-line arguments of collectl.
Ideally this will be useful for building up command line arguments for
remote execution as well as runnning directly on local host.
This is meant to be a fairly generic utility - for interfacing with
collectl CLI - logic more directly related to the Galaxy job metric plugin
plugin should be placed in other modules.
Keyword Arguments:
collectl_path: Path to collectl executable (defaults to collectl - i.e.
search the PATH).
playback_path (defaults to None): If this is None collectl will run in
record mode, else it will playback specified file.
Playback Mode Options:
sep : Separator used in playback mode (set to 9 to produce tsv)
(defaults to None).
Record Mode Options (some of these may work in playback mode also):
destination_path: Location of path files to write to (defaults to None
and collectl will just use cwd). Really this is just to prefix -
collectl will append hostname and datetime to file.
interval: Setup polling interval (secs) for most subsystems (defaults
to None and when unspecified collectl will use default of 1 second).
interval2: Setup polling interval (secs) for process information
(defaults to None and when unspecified collectl will use default to
60 seconds).
interval3: Setup polling interval (secs) for environment information
(defaults to None and when unspecified collectl will use default to
300 seconds).
procfilt: Optional argument to procfilt. (defaults to None).
flush : Optional flush interval (defaults to None).
"""
def __init__( self, **kwargs ):
command_args = {}
command_args[ "collectl_path" ] = kwargs.get( "collectl_path", "collectl" )
playback_path = kwargs.get( "playback_path", None )
self.mode = MODE_RECORD if not playback_path else MODE_PLAYBACK
if self.mode == MODE_RECORD:
mode_arg = ""
elif self.mode == MODE_PLAYBACK:
mode_arg = "-P -p '%s'" % playback_path
else:
raise Exception( "Invalid mode supplied to CollectlCli - %s" % self.mode )
command_args[ "mode_arg" ] = mode_arg
command_args[ "interval_arg" ] = self.__interval_arg( kwargs )
destination = kwargs.get( "destination_path", None )
if destination:
destination_arg = "-f '%s'" % destination
else:
destination_arg = ""
command_args[ "destination_arg" ] = destination_arg
procfilt = kwargs.get( "procfilt", None )
command_args[ "procfilt_arg" ] = "" if not procfilt else "--procfilt %s" % procfilt
command_args[ "subsystems_arg" ] = sel | f.__subsystems_arg( kwargs.get( "subsystems", [] ) )
flush = kwargs.get( "flush", None )
| command_args[ "flush_arg"] = "--flush %s" % flush if flush else ""
sep = kwargs.get( "sep", None )
command_args[ "sep_arg" ] = "--sep=%s" % sep if sep else ""
self.command_args = command_args
def __subsystems_arg( self, subsystems ):
if subsystems:
return "-s%s" % "".join( [ s.command_line_arg for s in subsystems ] )
else:
return ""
def __interval_arg( self, kwargs ):
if self.mode != MODE_RECORD:
return ""
interval = kwargs.get( "interval", None )
if not interval:
return ""
self.__validate_interval_arg( interval )
interval_arg = "-i %s" % interval
interval2 = kwargs.get( "interval2", None )
if not interval2:
return interval_arg
self.__validate_interval_arg( interval2, multiple_of=int( interval ) )
interval_arg = "%s:%s" % ( interval_arg, interval2 )
interval3 = kwargs.get( "interval3", None )
if not interval3:
return interval_arg
self.__validate_interval_arg( interval3, multiple_of=int( interval ) )
interval_arg = "%s:%s" % ( interval_arg, interval3 )
return interval_arg
def __validate_interval_arg( self, value, multiple_of=None ):
if value and not str(value).isdigit():
raise Exception( "Invalid interval argument supplied, must be integer %s" % value )
if multiple_of:
if int( value ) % multiple_of != 0:
raise Exception( "Invalid interval argument supplied, must multiple of %s" % multiple_of )
def build_command_line( self ):
return COMMAND_LINE_TEMPLATE.substitute( **self.command_args )
def run( self, stdout=subprocess.PIPE, stderr=subprocess.PIPE ):
command_line = self.build_command_line()
log.info( "Executing %s" % command_line )
proc = subprocess.Popen( command_line, shell=True, stdout=stdout, stderr=stderr )
return_code = proc.wait()
if return_code:
raise Exception( "Problem running collectl command." )
__all__ = [ CollectlCli ]
|
libvmi/libvmi | tools/windows-offset-finder/rekall_offset_finder.py | Python | lgpl-3.0 | 5,390 | 0.001299 | #!/usr/bin/env python3
"""
Rekall offset finder.
Usage:
rekall_offset_finder.py [options] <domain> [<url>]
Options:
-d --debug Enable debug output
-u URI, --uri=URI Specify Libvirt URI [Default: qemu:///system]
-o --old Use the old config format
-h --help Show this screen.
--version Show version.
"""
import sys
import os
import logging
import json
import stat
from io import StringIO
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
import libvirt
from docopt import docopt
from rekall import plugins, session
NT_KRNL_PDB = ['ntkrnlmp.pdb', 'ntkrpamp.pdb']
SCRIPT_DIR = str(Path(__file__).resolve().parent)
def find_ntoskrnl(version_modules):
for entry in version_modules:
e_type = entry[0]
if e_type == 'r':
e_data = entry[1]
if e_data['pdb'] in NT_KRNL_PDB:
return (e_data['pdb'], e_data['guid'])
raise RuntimeError('Cannot find {} with version_modules '
'plugin'.format(NT_KRNL_PDB))
def extract_offsets(domain, url):
s = session.Session(
filename=url,
autodetect=["rsds"],
logger=logging.getLogger(),
autodetect_build_local='none',
format='data',
profile_path=[
"http://profiles.rekall-forensic.com"
])
strio = StringIO()
s.RunPlugin("version_modules", output=strio)
version_modules = json.loads(strio.getvalue())
pdbase = s.profile.get_obj_offset('_KPROCESS', 'DirectoryTableBase')
tasks = s.profile.get_obj_offset('_EPROCESS', 'ActiveProcessLinks')
name = s.profile.get_obj_offset('_EPROCESS', 'ImageFileName')
pid = s.profile.get_obj_offset('_EPROCESS', 'UniqueProcessId')
# find ntoskrnl guid
ntos_pdb, ntos_guid = find_ntoskrnl(version_modules)
ntos_module = Path(ntos_pdb).stem
rekall_profile_path = os.path.join(SCRIPT_DIR,
"{}-profile.json".format(domain))
# create a new session with a text format
# allowing us to write files
s = session.Session(
filename=url,
autodetect=["rsds"],
logger=logging.getLogger(),
autodetect_build_local='none',
format='text',
profile_path=[
"http://profiles.rekall-forensic.com"
])
# build the Rekall JSON profile from PDB
s.RunPlugin("build_local_profile", module_name=ntos_module,
guid=ntos_guid, dumpfile=rekall_profile_path)
config = {
"ostype": "Windows",
"win_pdbase": pdbase,
"win_pid": pid,
"win_tasks": tasks,
"win_pname": name,
"rekall_profile": rekall_profile_path
}
return config
def format_config(domain, config, old_format=False):
if not old_format:
formatted_config = """
%s {
ostype = "Windows";
rekall_profile = "%s";
}
""" % (domain, config['rekall_profile'])
else:
formatted_config = """
%s {
ostype = "Windows";
win_pdbase = %s;
win_pid = %s;
win_tasks = %s;
win_pname = %s;
}
""" % (domain,
hex(config['win_pdbase']),
hex(config['win_pid']),
hex(config['win_tasks']),
hex(config['win_pname'])
)
return formatted_config
def main(args):
# delete rekall's BasicConfig
# we want to configure the root logger
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
debug = args['--debug']
# configure root logger
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
logging.debug(args)
domain_name = args['<domain>']
uri = args['--uri']
old_format = args['--old']
url = args['<url>']
config = None
if not url:
# take temporary memory dump
# we need to create our own tmp_dir
# otherwise the dumpfile will be owned by libvirt
# and we don't have the permission to remove it in /tmp
with TemporaryDirectory() as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as ram_dump:
# chmod to be r/w by everyone
# before libvirt takes ownership
os.chmod(ram_dump.name,
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP | stat.S_IWGRP |
stat.S_IROTH | stat.S_IWOTH)
con = libvirt.open(uri)
| domain = con.lookupByName(domain_name)
# take dump
logging.info('Dumping %s physical memory to %s', domain.name(),
ram_dump.name)
flags = libvirt.VIR_DUMP_MEMO | RY_ONLY
dumpformat = libvirt.VIR_DOMAIN_CORE_DUMP_FORMAT_RAW
domain.coreDumpWithFormat(ram_dump.name, dumpformat, flags)
ram_dump.flush()
# extract offsets
config = extract_offsets(domain.name(), ram_dump.name)
else:
config = extract_offsets(domain_name, url)
formatted_config = format_config(domain_name, config, old_format)
logging.info(formatted_config)
if __name__ == '__main__':
args = docopt(__doc__)
exit_code = main(args)
sys.exit(exit_code)
|
roadmapper/ansible | test/units/plugins/lookup/test_lastpass.py | Python | gpl-3.0 | 6,829 | 0.001464 | # (c)2016 Andrew Zenk <azenk@umn.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from argparse import ArgumentParser
from units.compat import unittest
from units.compat.mock import patch
from ansible.errors import AnsibleError
from ansible.module_utils import six
from ansible.plugins.lookup.lastpass import LookupModule, LPass, LPassException
MOCK_ENTRIES = [{'username': 'user',
'name': 'Mock Entry',
'password': 't0pS3cret passphrase entry!',
'url': 'https://localhost/login',
'notes': 'Test\nnote with multiple lines.\n',
'id': '0123456789'}]
class MockLPass(LPass):
_mock_logged_out = False
_mock_disconnected = False
def _lookup_mock_entry(self, key):
for entry in MOCK_ENTRIES:
if key == entry['id'] or key == entry['name']:
return entry
def _run(self, args, stdin=None, expected_rc=0):
# Mock behavior of lpass executable
base_options = ArgumentParser(add_help=False)
base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never'])
p = ArgumentParser()
sp = p.add_subparsers(help='command', dest='subparser_name')
logout_p = sp.add_parser('logout', parents=[base_options], help='logout')
show_p = sp.add_parser('show', parents=[base_options], help='show entry details')
field_group = show_p.add_mutually_exclusive_group(required=True)
for field in MOCK_ENTRIES[0].keys():
field_group.add_argument("--{0}".format(field), default=False, action='store_true')
field_group.add_argument('--field', default=None)
show_p.add_argument('selector', help='Unique Name or ID')
args = p.parse_args(args)
def mock_exit(output='', error='', rc=0):
if rc != expected_rc:
raise LPassException(error)
return output, error
if args.color != 'never':
return mock_exit(error='Error: Mock only supports --color=never', rc=1)
if args.subparser_name == 'logout':
if self._mock_logged_out:
return mock_exit(error='Error: Not currently logged in', rc=1)
logged_in_error = 'Are you sure you would like to log out? [Y/n]'
if stdin and stdin.lower() == 'n\n':
return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1)
elif stdin and stdin.lower() == 'y\n':
return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0)
else:
return mock_exit(error='Error: aborted response', rc=1)
if args.subparser_name == 'show':
if self._mock_logged_out:
return mock_exit(error='Error: Could not find decryption key.' +
' Perhaps you need to login with `lpass login`.', rc=1)
if self._mock_disconnected:
return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1)
mock_entry = self._lookup_mock_entry(args.selector)
if args.field:
return mock_exit(output=mock_entry.get(args.field, ''))
elif args.password:
return mock_exit(output=mock_entry.get('password', ''))
elif args.username:
return mock_exit(output=mock_entry.get('username', ''))
elif args.url:
return mock_exit(output=mock_entry.get('url', ''))
elif args.name:
return mock_exit(output=mock_entry.get('name', ''))
elif args.id:
return mock_exit(output=mock_entry.get('id', ''))
elif args.notes:
return mock_exit(output=mock_entry.get('notes', ''))
raise LPassException('We should never get here')
class DisconnectedMockLPass(MockLPass):
_mock_disconnected = True
class LoggedOutMockLPass(MockLPass):
_mock_logged_out = True
class TestLPass(unittest.TestCase):
def test_lastpass_cli_path(self):
lp = MockLPass(path='/dev/null')
self.assertEqual('/dev/null', lp.cli_path)
def test_lastpass_build_args_logout(self):
lp = MockLPass()
self.assertEqual(['logout', '--color=never'], lp._build_args("logout"))
def test_lastpass_logge | d_in_true(self):
lp = MockLPass()
self.assertTrue(lp.logged_in)
def test_lastpass_logged_in_false(self):
lp = LoggedOutMockLPass()
self.assertFalse(lp.logged_in)
def test_lastpass_show_disconnected(self):
| lp = DisconnectedMockLPass()
with self.assertRaises(LPassException):
lp.get_field('0123456789', 'username')
def test_lastpass_show(self):
lp = MockLPass()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(), lp.get_field(entry_id, k))
class TestLastpassPlugin(unittest.TestCase):
@patch('ansible.plugins.lookup.lastpass.LPass', new=MockLPass)
def test_lastpass_plugin_normal(self):
lookup_plugin = LookupModule()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(),
lookup_plugin.run([entry_id], field=k)[0])
@patch('ansible.plugins.lookup.lastpass.LPass', LoggedOutMockLPass)
def test_lastpass_plugin_logged_out(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
@patch('ansible.plugins.lookup.lastpass.LPass', DisconnectedMockLPass)
def test_lastpass_plugin_disconnected(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
|
GhostshipSoftware/avaloria | game/gamesrc/commands/world/character_commands.py | Python | bsd-3-clause | 7,180 | 0.004457 | from ev import Command, CmdSet
from src.commands.default.muxcommand import MuxCommand
class CmdAttack(Command):
"""
Begin to fight a target, typically an npc enemy.
Usage:
attack target-name
"""
key = 'attack'
aliases = ['kill']
help_category = "Combat"
locks = "cmd:all()"
def parse(self):
self.what = self.args.strip()
def func(self):
caller = self.caller
mob = caller.search(self.what, global_search=False)
if mob is None:
return
caller.begin_combat(mob)
class CmdTalk(Command):
"""
Attempt to talk to the given object, typically an npc who is able to respond
to you. Will return gracefully if the particular object does not support
having a conversation with the character.
Usage:
talk to <npc|object name> <whatever yer message is>
NOTE: Just because you can talk to an npc does not mean that they care about
or know about what you are discussing with them. Typically the control words
will be very easy to spot.
"""
key = 'talk'
aliases = ['talk to', 't']
help_category = 'general'
locks = "cmd:all()"
def parse(self):
if len(self.args) < 1:
print "usage: talk to <npc> <message>"
return
args = self.args.split()
self.npc = args[0]
args.remove(self.npc)
self.message = ' '.join(args)
def func(self):
if self.caller.db.in_combat:
self.caller.msg("{RCan't talk to people while in combat!")
return
if len(self.args) < 1:
self.caller.msg("usage: talk to <npc> <message>")
return
npc = self.caller.search(self.npc, global_search=False)
if hasattr(npc, "combatant"):
self.caller.msg("You can't talk to that, are you mad?")
else:
if npc is not None:
self.caller.msg("{mYou tell %s: %s{n" % (npc.name, self.message))
npc.dictate_action(self.caller, self.message)
else:
self.caller.msg("I don not see anyone around by that name.")
return
class CmdDisplaySheet(Command):
"""
Display your character sheet.
Usage:
stats
"""
key = 'stats'
aliases = ['points', 'sheet']
help_category = "General"
locks = "cmd:all()"
def fu | nc(self):
caller = self.caller
caller.display_charact | er_sheet()
class CmdLoot(Command):
"""
Pass a corpse name to this command to loot the corpse.
Usage:
loot <corpse>
"""
key = 'loot'
help_category = "General"
locks = "cmd:all()"
def parse(self):
self.what = self.args.strip()
def func(self):
obj = self.caller.search(self.what, global_search=False)
if obj is None:
return
if obj.db.corpse:
if len(obj.contents) == 0:
self.caller.msg("That corpse is empty.")
obj.db.destroy_me = True
return
for i in obj.contents:
if i.db.attributes['lootable']:
i.move_to(self.caller, quiet=True)
self.caller.msg("{CYou have looted a: %s{n" % i.name)
obj.db.destroy_me = True
else:
self.caller.msg("{RThat is not a corpse.{n")
class CmdEquip(Command):
key = 'equip'
help_category = "General"
locks = "cmd:all()"
def parse(self):
self.what = self.args.strip()
def func(self):
e = self.caller.db.equipment
obj = self.caller.search(self.what, global_search=False)
print e
print obj
oa = obj.db.attributes
if obj is None:
return
e['%s' % oa['item_slot']] = obj
self.caller.msg("{CYou have equipped: %s as your weapon.{n")
class CmdLook(MuxCommand):
"""
look at location or object
Usage:
look
look <obj>
look *<player>
Observes your location or objects in your vicinity.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
arg_regex = r"\s.*?|$"
def func(self):
"""
Handle the looking.
"""
caller = self.caller
args = self.args
if args:
# Use search to handle duplicate/nonexistant results.
if self.caller.location.db.decor_objects is not None and len(self.caller.location.db.decor_objects) > 0:
for k in self.caller.location.db.decor_objects:
print k
if k.lower() == args.lower():
caller.msg("%s" % self.caller.location.db.decor_objects[k])
return
looking_at_obj = caller.search(args, use_nicks=True)
if not looking_at_obj:
return
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having a player instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
class CmdEquip(Command):
"""
This attempts to equip items on the character. If no arguements are
given, then it picks the first item for each slot it finds and equips
those items in their respective slots.
usage:
equip <item to equip>
aliases: wield, equip item, e
"""
key = 'equip'
aliases = ['equip item', 'wield', 'e']
help_category = "General"
locks = "cmd:all()"
def parse(self):
if len(self.args) < 1:
self.what = None
else:
self.what = self.args.strip()
def func(self):
if self.caller.db.in_combat:
self.caller.msg("{RCan't equip while in combat!")
if len(self.args) < 1:
self.caller.msg("What did you want to equip? equip <item to equip>")
return
if self.what is not None:
obj = self.caller.search(self.what, global_search=False)
if not obj:
self.caller.msg("Are you sure you are carrying the item you are trying to equip?")
else:
self.caller.equip_item(ite=obj, slot=obj.db.attributes['item_slot'])
obj.on_equip()
else:
self.caller.equip_item(ite=None,slot=None)
class CharacterCmdSet(CmdSet):
key = "CharacterClassCommands"
def at_cmdset_creation(self):
self.add(CmdAttack())
self.add(CmdLoot())
self.add(CmdDisplaySheet())
self.add(CmdEquip())
self.add(CmdTalk())
self.add(CmdLook())
self.add(CmdEquip())
|
jmadajian/CinemaFlowWorks | Examples/kitchen_sink.py | Python | gpl-3.0 | 41,711 | 0.000479 | # -*- coding: utf-8 -*-
import kivymd.snackbar as Snackbar
from kivy.app import App
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ObjectProperty
from kivy.uix.image import Image
from kivymd.bottomsheet import MDListBottomSheet, MDGridBottomSheet
from kivymd.button import MDIconButton
from kivymd.label import MDLabel
from kivymd.list import ILeftBody, ILeftBodyTouch, IRightBodyTouch
from kivymd.navigationdrawer import NavigationDrawer
from kivymd.selectioncontrols import MDCheckbox
from kivymd.theming import ThemeManager
from kivymd.dialog import MDDialog
from kivymd.time_picker import MDTimePicker
from kivymd.date_picker import MDDatePicker
from kivymd.material_resources import DEVICE_TYPE
main_widget_kv = '''
#:import Toolbar kivymd.toolbar.Toolbar
#:import ThemeManager kivymd.theming.ThemeManager
#:import NavigationDrawer kivymd.navigationdrawer.NavigationDrawer
#:import MDCheckbox kivymd.selectioncontrols.MDCheckbox
#:import MDSwitch kivymd.selectioncontrols.MDSwitch
#:import MDList kivymd.list.MDList
#:import OneLineListItem kivymd.list.OneLineListItem
#:import TwoLineListItem kivymd.list.TwoLineListItem
#:import ThreeLineListItem kivymd.list.ThreeLineListItem
#:import OneLineAvatarListItem kivymd.list.OneLineAvatarListItem
#:import OneLineIconListItem kivymd.list.OneLineIconListItem
#:import OneLineAvatarIconListItem kivymd.list.OneLineAvatarIconListItem
#:import SingleLineTextField kivymd.textfields.SingleLineTextField
#:import MDSpinner kivymd.spinner.MDSpinner
#:import MDCard kivymd.card.MDCard
#:import MDSeparator kivymd.card.MDSeparator
#:import MDDropdownMenu kivymd.menu.MDDropdownMenu
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:import colors kivymd.color_definitions.colors
#:import SmartTile kivymd.grid.SmartTile
#:import MDSlider kivymd.slider.MDSlider
#:import MDTabbedPanel kivymd.tabs.MDTabbedPanel
#:import MDTab kivymd.tabs.MDTab
#:import MDProgressBar kivymd.progressbar.MDProgressBar
#:import MDAccordion kivymd.accordion.MDAccordion
#:import MDAccordionItem kivymd.accordion.MDAccordionItem
#:import MDThemePicker kivymd.theme_picker.MDThemePicker
#:import MDBottomNavigation kivymd.tabs.MDBottomNavigation
#:import MDBottomNavigationItem kivymd.tabs.MDBottomNavigationItem
BoxLayout:
orientation: 'vertical'
Toolbar:
id: toolbar
title: 'KivyMD Kitchen Sink'
background_color: app.theme_cls.primary_color
background_palette: 'Primary'
background_hue: '500'
left_action_items: [['menu', lambda x: app.nav_drawer.toggle()]]
right_action_items: [['dots-vertical', lambda x: app.nav_drawer.toggle()]]
ScreenManager:
id: scr_mngr
Screen:
name: 'bottomsheet'
MDRaisedButton:
text: "Open List Bottom Sheet"
opposite_colors: True
size_hint: None, None
size: 4 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.6}
on_release: app.show_example_bottom_sheet()
MDRaisedButton:
text: "Open grid bottom sheet"
opposite_colors: True
size_hint: None, None
size: 4 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.3}
on_release: app.show_example_grid_bottom_sheet()
Screen:
name: 'button'
BoxLayout:
size_hint: None, None
size: '88dp', '48dp'
padding: '12dp'
pos_hint: {'center_x': 0.75, 'center_y': 0.8}
MDLabel:
font_style: 'Body1'
theme_text_color: 'Primary'
text: "Disable buttons"
size_hint_x:None
width: '56dp'
MDCheckbox:
id: disable_the_buttons
MDIconButton:
icon: 'sd'
pos_hint: {'center_x': 0.25, 'center_y': 0.8}
disabled: disable_the_buttons.active
MDFlatButton:
text: 'MDFlatButton'
pos_hint: {'center_x': 0.5, 'center_y': 0.6}
disabled: disable_the_buttons.active
MDRaisedButton:
text: "MDRaisedButton"
elevation_normal: 2
opposite_colors: True
pos_hint: {'center_x': 0.5, 'center_y': 0.4}
disabled: disable_the_buttons.active
MDFloatingActionButton:
id: float_act_btn
icon: 'plus'
opposite_colors: True
elevation_normal: 8
pos_hint: {'center_x': 0.5, 'center_y': 0.2}
disabled: disable_the_buttons.active
Screen:
name: 'card'
MDCard:
size_hint: None, None
size: dp(320), dp(180)
pos_hint: {'center_x': 0.5, 'center_y': 0.7}
| MDCard:
size_hint: Non | e, None
size: dp(320), dp(180)
pos_hint: {'center_x': 0.5, 'center_y': 0.3}
BoxLayout:
orientation:'vertical'
padding: dp(8)
MDLabel:
text: 'Title'
theme_text_color: 'Secondary'
font_style:"Title"
size_hint_y: None
height: dp(36)
MDSeparator:
height: dp(1)
MDLabel:
text: 'Body'
theme_text_color: 'Primary'
Screen:
name: 'slider'
BoxLayout:
MDSlider:
id: hslider
min:0
max:100
value: 10
MDSlider:
id: vslider
orientation:'vertical'
min:0
max:100
value: hslider.value
Screen:
name: 'dialog'
MDRaisedButton:
text: "Open dialog"
size_hint: None, None
size: 3 * dp(48), dp(48)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
opposite_colors: True
on_release: app.show_example_dialog()
Screen:
name: 'grid'
ScrollView:
do_scroll_x: False
GridLayout:
cols: 3
row_default_height: (self.width - self.cols*self.spacing[0])/self.cols
row_force_default: True
size_hint_y: None
height: 8 * dp(100) # /1 * self.row_default_height
padding: dp(4), dp(4)
spacing: dp(4)
SmartTileWithLabel:
mipmap: True
source: './assets/african-lion-951778_1280.jpg'
text: "African Lion"
SmartTile:
mipmap: True
source: './assets/beautiful-931152_1280.jpg'
SmartTile:
mipmap: True
source: './assets/african-lion-951778_1280.jpg'
SmartTile:
mipmap: True
source: './assets/guitar-1139397_1280.jpg'
SmartTile:
mipmap: True
source: './assets/robin-944887_1280.jpg'
SmartTile:
mipmap: True
source: './assets/kitten-1049129_1280.jpg'
SmartTile:
mipmap: True
source: './assets/light-bulb-1042480_1280.jpg'
SmartTile:
mipmap: True
source: './assets/tangerines-1111529_1280.jpg'
Screen:
name: 'list'
|
elfnor/sverchok | nodes/modifier_make/polygons_adaptative.py | Python | gpl-3.0 | 5,472 | 0.001102 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software | Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import FloatProperty
import bmesh
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (updateNode, Vector_generate,
Vector_degenerate, ma | tch_long_repeat)
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata
# "coauthor": "Alessandro Zomparelli (sketchesofcode)"
class AdaptivePolsNode(bpy.types.Node, SverchCustomTreeNode):
''' Make spread one object on another adaptively polygons of mesh (not including matrixes, so apply scale-rot-loc ctrl+A) '''
bl_idname = 'AdaptivePolsNode'
bl_label = 'Adaptive Polygons'
bl_icon = 'OUTLINER_OB_EMPTY'
width_coef = FloatProperty(name='width_coef',
description='with coefficient for sverchok adaptivepols donors size',
default=1.0, max=3.0, min=0.5,
update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "VersR", "VersR")
self.inputs.new('StringsSocket', "PolsR", "PolsR")
self.inputs.new('VerticesSocket', "VersD", "VersD")
self.inputs.new('StringsSocket', "PolsD", "PolsD")
self.inputs.new('StringsSocket', "Z_Coef", "Z_Coef")
self.outputs.new('VerticesSocket', "Vertices", "Vertices")
self.outputs.new('StringsSocket', "Poligons", "Poligons")
def draw_buttons(self, context, layout):
layout.prop(self, "width_coef", text="donor width")
def lerp(self, v1, v2, v3, v4, v):
v12 = v1 + (v2-v1)*v[0] + ((v2-v1)/2)
v43 = v4 + (v3-v4)*v[0] + ((v3-v4)/2)
return v12 + (v43-v12)*v[1] + ((v43-v12)/2)
def lerp2(self, v1, v2, v3, v4, v, x, y):
v12 = v1 + (v2-v1)*v[0]*x + ((v2-v1)/2)
v43 = v4 + (v3-v4)*v[0]*x + ((v3-v4)/2)
return v12 + (v43-v12)*v[1]*y + ((v43-v12)/2)
def lerp3(self, v1, v2, v3, v4, v, x, y, z):
loc = self.lerp2(v1.co, v2.co, v3.co, v4.co, v, x, y)
nor = self.lerp(v1.normal, v2.normal, v3.normal, v4.normal, v)
nor.normalize()
#print (loc, nor, v[2], z)
return loc + nor*v[2]*z
def process(self):
# достаём два слота - вершины и полики
if all(s.is_linked for s in self.inputs[:-1]):
if self.inputs['Z_Coef'].is_linked:
z_coef = self.inputs['Z_Coef'].sv_get()[0]
else:
z_coef = []
polsR = self.inputs['PolsR'].sv_get()[0] # recipient one object [0]
versR = self.inputs['VersR'].sv_get()[0] # recipient
polsD = self.inputs['PolsD'].sv_get() # donor many objects [:]
versD_ = self.inputs['VersD'].sv_get() # donor
versD = Vector_generate(versD_)
polsR, polsD, versD = match_long_repeat([polsR, polsD, versD])
bm = bmesh_from_pydata(versR, [], polsR, normal_update=True)
bm.verts.ensure_lookup_table()
new_ve = bm.verts
vers_out = []
pols_out = []
i = 0
for vD, pR in zip(versD, polsR):
# part of donor to make limits
j = i
pD = polsD[i]
xx = [x[0] for x in vD]
x0 = (self.width_coef) / (max(xx)-min(xx))
yy = [y[1] for y in vD]
y0 = (self.width_coef) / (max(yy)-min(yy))
zz = [z[2] for z in vD]
zzz = (max(zz)-min(zz))
if zzz:
z0 = 1 / zzz
else:
z0 = 0
# part of recipient polygons to reciev donor
last = len(pR)-1
vs = [new_ve[v] for v in pR] # new_ve - temporery data
if z_coef:
if j < len(z_coef):
z1 = z0 * z_coef[j]
else:
z1 = z0
new_vers = []
new_pols = []
for v in vD:
new_vers.append(self.lerp3(vs[0], vs[1], vs[2], vs[last], v, x0, y0, z1))
for p in pD:
new_pols.append([id for id in p])
pols_out.append(new_pols)
vers_out.append(new_vers)
i += 1
bm.free()
output = Vector_degenerate(vers_out)
self.outputs['Vertices'].sv_set(output)
self.outputs['Poligons'].sv_set(pols_out)
def register():
bpy.utils.register_class(AdaptivePolsNode)
def unregister():
bpy.utils.unregister_class(AdaptivePolsNode)
#if __name__ == '__main__':
# register()
|
Pexego/PXGO_00053_2013_VT | project-addons/sale_product_customize/wizard/mrp_create_prod.py | Python | agpl-3.0 | 2,350 | 0.000426 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@pexego.es>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############ | ##################################################################
from openerp import models, fields, api, exceptions, _
class CreateMountedProd(models.TransientModel):
_name = "mrp.mounted.product.create.wizard"
mount_product = fields.Many2one('product.product',
'Mount product', required=True)
mounted_product = fields.Many2one('product.product',
| 'mounted product', required=True)
@api.multi
def create_product(self):
if not self.mount_product.default_code or not \
self.mounted_product.default_code:
raise exceptions.except_orm(_('Code error'),
_('One of the products not have ref.'))
prod_created = self.env['product.product'].search(
[('default_code', '=',
self.mount_product.default_code +
self.mounted_product.default_code)])
if prod_created:
raise exceptions.except_orm(_('Product error'),
_('The mounted product alredery exists'))
self.env['product.product'].create_mounted_product(
self.mount_product, self.mounted_product)
if self.mounted_product not in self.mount_product:
self.mount_product.can_mount_ids = [(4,
self.mounted_product.id)]
return True
|
nrjl/GPN | active_statruns.py | Python | mit | 7,118 | 0.00562 | # Simple 1D GP classification example
import time
import numpy as np
import matplotlib.pyplot as plt
import GPpref
import plot_tools as ptt
from active_learners import ActiveLearner, UCBLatent, PeakComparitor, LikelihoodImprovement, ABSThresh, UCBAbsRel
import test_data
import pickle
class Learner(object):
def __init__(self, model_type, obs_arguments):
self.model_type = model_type
self.obs_arguments = obs_arguments
def build_model(self, training_data):
self.model = self.model_type(**training_data)
def wrms(y_true, y_est, weight=True):
if weight:
w = y_true
else:
w = 1.0
return np.sqrt(np.mean(((y_true - y_est)*w)**2))
nowstr = time.strftime("%Y_%m_%d-%H_%M")
plt.rc('font',**{'family':'serif','sans-serif':['Computer Modern Roman']})
plt.rc('text', usetex=True)
# log_hyp = np.log([0.1,0.5,0.1,10.0]) # length_scale, sigma_f, sigma_probit, v_beta
# log_hyp = np.log([0.07, 0.75, 0.25, 1.0, 28.1])
log_hyp = np.log([0.05, 1.5, 0.09, 2.0, 50.0])
np.random.seed(10)
n_rel_train = 1
n_abs_train = 0
rel_sigma = 0.02
delta_f = 1e-5
beta_sigma = 0.8
beta_v = 100.0
n_xtest = 101
n_best_points = 15
n_mcsamples = 1000
n_ysamples = 101
n_trials = 100
n_rel_samples = 5
n_queries = 20
# Define polynomial function to be modelled
random_wave = test_data.VariableWave([0.6, 1.0], [5.0, 10.0], [0.0, 1.0], [10.0, 20.0])
nowstr = time.strftime("%Y_%m_%d-%H_%M")
data_dir = 'data/' + nowstr + '/'
ptt.ensure_dir(data_dir)
print "Data will be saved to: {0}".format(data_dir)
# True function
x_plot = np.linspace(0.0,1.0,n_xtest,dtype='float')
x_test = np.atleast_2d(x_plot).T
# Construct active learner object
learners = [Learner(ActiveLearner, {'p_rel': 0.5, 'n_rel_samples': n_rel_samples}), # 'Random (rel and abs)',
Learner(ActiveLearner, {'p_rel': 1.0, 'n_rel_samples': n_rel_samples}), # 'Random (rel)',
Learner(ActiveLearner, {'p_rel': 0.0, 'n_rel_samples': n_rel_samples}), # 'Random (abs)',
Learner(UCBLatent, {'gamma': 2.0, 'n_test': 100}), # 'UCBLatent'
Learner(UCBAbsRel, { 'n_test': 100, 'p_rel': 0.5, 'n_rel_samples': n_rel_samples, 'gamma': 2.0, 'tau':5.0}), # 'UCBCombined',
# Learner(ABSThresh, {'n_test': 100, 'p_thresh': 0.7}), # 'ABSThresh'
# Learner(PeakComparitor, {'gamma': 2.0, 'n_test': 50, 'n_rel_samples': n_rel_samples}), # 'PeakComparitor'
# Learner(LikelihoodImprovement, {'req_improvement': 0.60, 'n_test': 50, 'gamma': 2.0, 'n_rel_samples': n_rel_samples, 'p_thresh': 0.7}) # 'LikelihoodImprovement'
]
names = ['Random (rel and abs)', 'Random (rel)', 'Random (abs)', 'UCBLatent (abs)', 'UCBCombined (rel and abs)']
n_learners = len(learners)
obs_array = [{'name': name, 'obs': []} for name in names]
wrms_results = np.zeros((n_learners, n_queries+1, n_trials))
true_pos_results = np.zeros((n_learners, n_queries+1, n_trials), dtype='int')
selected_error = np.zeros((n_learners, n_queries+1, n_trials))
for trial_number in range(n_trials):
print 'Trial {0}'.format(trial_number)
random_wave.randomize(print_vals=True)
rel_obs_fun = GPpref.RelObservationSampler(random_wave.out, GPpref.PrefProbit(sigma=rel_sigma))
abs_obs_fun = GPpref.AbsObservationSampler(random_wave.out, GPpref.AbsBoundProbit(sigma=beta_sigma, v=beta_v))
f_true = abs_obs_fun.f(x_test)
y_abs_true = abs_obs_fun.mean_link(x_test)
best_points = np.argpartition(y_abs_true.flatten(), -n_best_points)[-n_best_points:]
best_points_set = set(best_points)
abs_y_samples = np.atleast_2d(np.linspace(0.01, 0.99, n_ysamples)).T
p_abs_y_true = abs_obs_fun.observation_likelihood_array(x_test, abs_y_samples)
p_rel_y_true = rel_obs_fun.observation_likelihood_array(x_test)
# Initial data
x_rel, uvi_rel, uv_rel, y_rel, fuv_rel = rel_obs_fun.generate_n_observations(n_rel_train, n_xdim=1)
x_abs, y_abs, mu_abs = abs_obs_fun.generate_n_observations(n_abs_train, n_xdim=1)
training_data = {'x_rel': x_rel, 'uvi_rel': uvi_rel, 'x_abs': x_abs, 'y_rel': y_rel, 'y_abs': y_abs,
'delta_f': delta_f, 'rel_likelihood': | GPpref.PrefProbit(),
'abs_likelihood': GPpref.AbsBoundProbit()}
# Get initial solution
for nl, learner in enumerate(learners):
learner.build_model(training_data)
learner.model.set_hyperparameters(log_hyp)
f = learner.model.solve_laplace()
| fhat, vhat = learner.model.predict_latent(x_test)
y_abs_est = learner.model.abs_posterior_mean(x_test, fhat, vhat)
wrms_results[nl, 0, trial_number] = wrms(y_abs_true, y_abs_est)
for obs_num in range(n_queries):
learners[4].obs_arguments['p_rel'] = max(0.0, (20-obs_num)/20.0)
for nl, learner in enumerate(learners):
next_x = learner.model.select_observation(**learner.obs_arguments)
if next_x.shape[0] == 1:
next_y, next_f = abs_obs_fun.generate_observations(next_x)
learner.model.add_observations(next_x, next_y)
# print 'Abs: x:{0}, y:{1}'.format(next_x[0], next_y[0])
else:
next_y, next_uvi, next_fx = rel_obs_fun.cheat_multi_sampler(next_x)
next_fuv = next_fx[next_uvi][:,:,0]
fuv_rel = np.concatenate((fuv_rel, next_fuv), 0)
learner.model.add_observations(next_x, next_y, next_uvi)
# print 'Rel: x:{0}, best_index:{1}'.format(next_x.flatten(), next_uvi[0, 1])
f = learner.model.solve_laplace()
fhat, vhat = learner.model.predict_latent(x_test)
y_abs_est = learner.model.abs_posterior_mean(x_test, fhat, vhat)
best_points_est = set(np.argpartition(y_abs_est.flatten(), -n_best_points)[-n_best_points:])
true_pos_results[nl, obs_num+1, trial_number] = len(best_points_set.intersection(best_points_est))
wrms_results[nl, obs_num+1, trial_number] = wrms(y_abs_true, y_abs_est)
selected_error[nl, obs_num + 1, trial_number] = wrms(y_abs_true[best_points], y_abs_est[best_points], weight=False)
print true_pos_results[:, obs_num+1, trial_number]
print wrms_results[:, obs_num+1, trial_number]
for nl, learner in enumerate(learners):
obs_tuple = learner.model.get_observations()
obs_array[nl]['obs'].append(ObsObject(*obs_tuple))
with open(data_dir+'wrms.pkl', 'wb') as fh:
pickle.dump(wrms_results, fh)
with open(data_dir+'true_pos.pkl', 'wb') as fh:
pickle.dump(true_pos_results, fh)
with open(data_dir+'selected_error.pkl', 'wb') as fh:
pickle.dump(selected_error, fh)
with open(data_dir+'obs.pkl', 'wb') as fh:
pickle.dump(obs_array, fh)
f0, ax0 = plt.subplots()
hl = ax0.plot(np.arange(n_queries+1), np.mean(wrms_results, axis=2).T)
f0.legend(hl, names)
f1, ax1 = plt.subplots()
hl1 = ax1.plot(np.arange(n_queries+1), np.mean(true_pos_results, axis=2).T)
f1.legend(hl1, names)
f2, ax2 = plt.subplots()
hl2 = ax2.plot(np.arange(n_queries+1), np.mean(selected_error, axis=2).T)
f2.legend(hl2, names)
plt.show() |
L0st1nC0d3/TextAdventure | todb.py | Python | epl-1.0 | 1,112 | 0.003597 | import sys
import sqlite3
import os
import os.path
def main(dbname):
con = sqlite3.connect(dbname)
con.execute("CREATE TABLE IF NOT EXISTS rooms(id INTEGER PRIMARY KEY, name TEXT NOT NULL,"
" description TEXT NOT NULL)")
con.commit()
path = "./rooms"
| for f in os.listdir(path):
f_n = os.path.join(path, f)
base, extension = os.path.splitext(f)
if extension == '.json' and 'r' in base:
with open(f_n, 'r') as fa:
json = fa.read()
if len(base) == 3:
s = base[1]+base[2]
else:
s = base[1]
print("Inserting room {0}". | format(int(s)))
con.execute("INSERT OR REPLACE INTO rooms(id, name, description)"
"VALUES(?, ?, ?);", (int(s), json.decode('utf8'), json.decode('utf8')))
con.commit()
con.close()
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage: {0} <database name>'.format(sys.argv[0]))
else:
main(sys.argv[1])
|
senttech/Cura | plugins/PerObjectSettingsTool/__init__.py | Python | agpl-3.0 | 1,195 | 0.005858 | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from . import PerObjectSettingsTool
from . import PerObjectSettingVisibilityHandler
from PyQt5.QtQml import qmlRegisterType
from UM.i18n import i18nCatalog
i18n_catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": i18n_catalog.i18nc("@label", "Per Model Settings Tool"),
"author": "Ultimaker",
"version": "1.0",
"description": i18n_catalog.i18nc("@info:whatsthis", "Provides the Per Model Settings."),
"api": 3
},
"tool": {
"name": i18n_catalog.i18nc("@label", "Per Model Settings"),
"description": i18n_catalog.i18nc("@info:tooltip", "Co | nfigure Per Model Settings"),
"icon": "setting_per_object",
"tool_panel": "PerObjectSettingsPanel.qml",
"weight": 3
},
}
def register(app):
qmlRegisterType(PerObjectSettingVisibilityHandler.PerObjectSettingVisibilityHandler, "Cura", 1, 0,
"PerObjectSettingVisibilityHandler")
return { "tool": PerObjectSettingsTool.PerObjectSetting | sTool() }
|
tobias-froehlich/cryspy | cryspy/lab.py | Python | gpl-3.0 | 6,803 | 0.00588 | import cryspy.numbers
import cryspy.geo
class Goniometer:
def __init__(self, motiontype, axis, direction, parametername):
assert motiontype in ["translation", "rotation"], \
"First parameter for creating a Goniometer " \
"must be one of the strings " \
"'translation' or 'rotation'."
assert axis in ["x", "y", "z"], \
"Second parameter for creating a Goniometer " \
"must be one of the strings 'x', 'y' or 'z'"
if motiontype == "translation":
assert direction in ["positive", "negative"], \
"Third parameter for creating a Goniometer " \
"for translation must be one of the strings " \
"'positive' or 'negative'"
elif motiontype == "rotation":
assert direction in ["clockwise", "counterclockwise"], \
"Third parameter for creating a Goniometer for " \
"rotation must be one of the strings "\
"'clockwise' or 'counterclockwise'"
assert isinstance(parametername, str), \
"Fourth parameter for creating a Goniometer must be " \
"of type str. You can use any string."
self.composed = False
self.motiontype = motiontype
self.axis = axis
self.direction = direction
self.parameternames = [parametername]
def operator(self, parameters):
assert isinstance(parameters, dict), \
"Parameter of cryspy.lab.Goniometer.operator() must be a " \
"dictionary"
if not self.composed:
# assert len(parameters) == 1, \
# "A Goniometer which is not composed can have only one " \
# "parameter."
# parametername = list(parameters.keys())[0]
assert self.parameternames[0] in parameters.keys(), \
"You must specify the parameter called '%s'."\
%(self.parameternames[0])
parameter = parameters[self.parameternames[0]]
if self.motiontype == "translation":
if self.direction == "negative":
parameter = -parameter
if self.axis == "x":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, parameter],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
)
)
if self.axis == "y":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, 0],
[0, 1, 0, parameter],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
)
)
if self.axis == "z":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, parameter],
[0, 0, 0, 1]
]
)
)
elif self.motiontype == "rotation":
if self.direction == "clockwise":
parameter = -parameter
cos = cryspy.numbers.dcos(parameter)
sin = cryspy.numbers.dsin(parameter)
if self.axis == "x":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, 0],
[0, cos, -sin, 0],
[0, sin, cos, 0],
[0, 0, 0, 1]
]
)
)
if self.axis == "y":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[ cos, 0, sin, 0],
[ 0, 1, 0, 0],
[-sin, 0, cos, 0],
[ 0, 0, 0, 1]
]
)
)
if self.axis == "z":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[cos, -sin, 0, 0],
[sin, cos, 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]
]
)
)
else:
return cryspy.geo.Operator(
self.lower_gonio.operator(parameters).value
* self.upper_gonio.operator(parameters).value
)
def __str__(self):
if not self.composed:
if self.motiontype == "translation":
return " / translate by \\ \n" \
"| %16s |\n" \
"| along |\n" \
"| %s-axis |\n" \
" \\ %8s / "\
%(self.parameternames[0], self.axis, self.direction)
elif self.motiontype == "rotation":
return " / rotate by \\ \n" \
"| | %16s |\n" \
"| around |\n" \
"| %s-axis |\n" \
" \\ %16s / "\
%(self.parameternames[0], self.axis, self.direction)
else:
return cryspy.blockprint.block([[str(self.lower_gonio), " \n \n*\n \n", str(self.upper_gonio)]])
| def __mul__(self, right):
if isinstance(right, Goniometer):
for parametername in right.parameternames:
assert parametername not in self.parameternames, \
"Cannot multiply two Goniometers which have " \
"both the parameter '%s'."%(parametername)
result = Goniometer("translation", "x", "positive", "dummy")
result.composed = True
result.motiontype = None
result.axis = None
result.direction = None
result.parameternames = self.parameternames + right.parameternames
result.lower_gonio = self
result.upper_gonio = right
return result
else:
return NotImplemented
|
GluuFederation/community-edition-setup | install.py | Python | mit | 4,334 | 0.004845 | #!/usr/bin/python3
import sys
sys.path.append('/usr/lib/python3.6/gluu-packaged/')
import site
import re
import glob
import os
import subprocess
import argparse
import time
import zipfile
import shutil
import distutils
import requests
from urllib.parse import urljoin
run_time = time.strftime("%Y-%m-%d_%H-%M-%S")
ces_dir = '/install/community-edition-setup'
app_dir = '/opt/dist/app'
parser = argparse.ArgumentParser(description="This script extracts community-edition-setup package and runs setup.py without arguments")
parser.add_argument('-o', help="download latest package from github and override current community-edition-setup", action='store_true')
parser.add_argument('--args', help="Arguments to be passed to setup.py")
parser.add_argument('-b', help="Github branch name, e.g. version_4.0.b4")
argsp = parser.parse_args()
npyscreen_package = '/opt/dist/app/npyscreen-master.zip'
if argsp.o:
for cep in glob.glob('/opt/dist/gluu/community-edition-setup*.zip'):
os.remove(cep)
if os.path.exists(ces_dir):
back_dir = ces_dir+'.back.'+run_time
print("Backing up", ces_dir, "to", back_dir)
os.rename(ces_dir, back_dir)
github_base_url = 'https://github.com/GluuFederation/community-edition-setup/archive/'
arhchive_name = 'master.zip'
if argsp.b:
arhchive_name = argsp.b+'.zip'
download_link = urljoin(github_base_url, arhchive_name)
ces_list = glob.glob('/opt/dist/gluu/community-edition-setup*.zip')
if not ces_list:
if not argsp.o:
print("community-edition-setup package was not found")
dl = input("Download from github? (Y/n) ")
else:
dl = 'y'
if not dl.strip() or dl.lower()[0]=='y':
print("Downloading ", download_link)
result = requests.get(download_link, allow_redirects=True)
with open('/opt/dist/gluu/community-edition-setup.zip', 'wb') as w:
w.write(result.content)
ces_list = [os.path.join('/opt/dist/gluu', arhchive_name)]
else:
print("Exiting...")
sys.exit()
ces = max(ces_list)
ces_zip = zipfile.ZipFile(ces)
parent_dir = ces_zip.filelist[0].filename
target_dir = '/tmp/ces_tmp'
ces_zip.extractall(target_dir)
if not os.path.exists(ces_dir):
os.makedirs(ces_dir)
print("Extracting community-edition-setup package")
source_dir = os.path.join(target_dir, parent_dir)
ces_zip.close()
if not os.path.exists(source_dir):
sys.exit("Unzip failed. Exting")
cmd = 'cp -r -f {}* /install/community-edition-setup'.format(source_dir)
os.system(cmd)
os.system('rm -r -f '+source_dir)
shutil.rmtree(target_dir)
|
os.chmod('/install/community-edition-setup/setup.py', 33261)
post_setup = '/install/community-edition-setup/post-setup-add-components.py'
if os.path.exists(post_setup):
os.chmod(post_setup, 33261)
gluu_install = '/install/community-editi | on-setup/gluu_install.py'
if os.path.exists(gluu_install):
os.remove(gluu_install)
if argsp.o:
npy_download_link = 'https://github.com/npcole/npyscreen/archive/master.zip'
result = requests.get(npy_download_link, allow_redirects=True)
with open(npyscreen_package, 'wb') as w:
w.write(result.content)
if os.path.exists(npyscreen_package):
site_libdir = site.getsitepackages()[0]
dest_dir = os.path.join(site_libdir, 'npyscreen')
if not os.path.exists(dest_dir):
print("Extracting npyscreen to", dest_dir)
npyzip = zipfile.ZipFile(npyscreen_package)
parent_dir = npyzip.filelist[0].filename
target_dir = '/tmp/npyscreen_tmp'
npyzip.extractall(target_dir)
npyzip.close()
shutil.copytree(
os.path.join(target_dir, parent_dir, 'npyscreen'),
dest_dir
)
shutil.rmtree(target_dir)
print("Extracting sqlalchemy")
sqlalchemy_fn = os.path.join(app_dir, 'sqlalchemy.zip')
sqlalchemy_zip = zipfile.ZipFile(sqlalchemy_fn)
sqlalchemy_parent_dir = sqlalchemy_zip.filelist[0].filename
target_dir = '/tmp/sqlalchemy_tmp'
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
sqlalchemy_zip.extractall(target_dir)
sqlalchemy_zip.close()
sqlalchemy_dir = os.path.join(ces_dir, 'setup_app/pylib/sqlalchemy')
shutil.copytree(
os.path.join(target_dir, sqlalchemy_parent_dir, 'lib/sqlalchemy'),
sqlalchemy_dir
)
shutil.rmtree(target_dir)
|
google/clusterfuzz | src/clusterfuzz/_internal/chrome/build_info.py | Python | apache-2.0 | 6,483 | 0.009409 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, | Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o | r implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for fetching build info from OmahaProxy."""
import json
import re
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
BUILD_INFO_PATTERN = ('([a-z]+),([a-z]+),([0-9.]+),'
'[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,'
'([0-9a-f]+),.*')
BUILD_INFO_URL = 'https://omahaproxy.appspot.com/all?csv=1'
BUILD_INFO_URL_CD = ('https://chromiumdash.appspot.com/fetch_releases?'
'num=1&platform={platform}')
class BuildInfo(object):
"""BuildInfo holds build metadata pulled from OmahaProxy."""
def __init__(self, platform, build_type, version, revision):
self.platform = platform
self.build_type = build_type
self.version = version
self.revision = revision
def _convert_platform_to_omahaproxy_platform(platform):
"""Converts platform to omahaproxy platform for use in
get_production_builds_info."""
platform_lower = platform.lower()
if platform_lower == 'windows':
return 'win'
return platform_lower
def _convert_platform_to_chromiumdash_platform(platform):
"""Converts platform to Chromium Dash platform.
Note that Windows in Chromium Dash is win64 and we only want win32."""
platform_lower = platform.lower()
if platform_lower == 'windows':
return 'Win32'
return platform_lower.capitalize()
def _fetch_releases_from_chromiumdash(platform, channel=None):
"""Makes a Call to chromiumdash's fetch_releases api,
and returns its json array response."""
chromiumdash_platform = _convert_platform_to_chromiumdash_platform(platform)
query_url = BUILD_INFO_URL_CD.format(platform=chromiumdash_platform)
if channel:
query_url = query_url + '&channel=' + channel
build_info = utils.fetch_url(query_url)
if not build_info:
logs.log_error('Failed to fetch build info from %s' % query_url)
return []
try:
build_info_json = json.loads(build_info)
if not build_info_json:
logs.log_error('Empty response from %s' % query_url)
return []
except Exception:
logs.log_error('Malformed response from %s' % query_url)
return []
return build_info_json
def get_production_builds_info(platform):
"""Gets the build information for production builds.
Omits platforms containing digits, namely, win64.
Omits channels containing underscore, namely, canary_asan.
Platform is e.g. ANDROID, LINUX, MAC, WIN.
"""
builds_metadata = []
omahaproxy_platform = _convert_platform_to_omahaproxy_platform(platform)
build_info = utils.fetch_url(BUILD_INFO_URL)
if not build_info:
logs.log_error('Failed to fetch build info from %s' % BUILD_INFO_URL)
return []
for line in build_info.splitlines():
match = re.match(BUILD_INFO_PATTERN, line)
if not match:
continue
platform_type = match.group(1)
if platform_type != omahaproxy_platform:
continue
build_type = match.group(2)
version = match.group(3)
revision = match.group(4)
builds_metadata.append(BuildInfo(platform, build_type, version, revision))
return builds_metadata
def get_production_builds_info_from_cd(platform):
"""Gets the build information from Chromium Dash for production builds.
Omits platforms containing digits, namely, win64.
Omits channels containing underscore, namely, canary_asan.
Platform is e.g. ANDROID, LINUX, MAC, WINDOWS.
"""
builds_metadata = []
build_info_json = _fetch_releases_from_chromiumdash(platform)
for info in build_info_json:
build_type = info['channel'].lower()
if build_type == 'extended':
build_type = 'extended_stable'
version = info['version']
revision = info['hashes']['chromium']
builds_metadata.append(BuildInfo(platform, build_type, version, revision))
# Hack: pretend Windows extended stable info to be Linux extended stable info.
# Because Linux doesn't have extended stable channel.
if platform.lower() == 'linux':
es_info = _fetch_releases_from_chromiumdash(
'WINDOWS', channel='Extended')[0]
builds_metadata.append(
BuildInfo(platform, 'extended_stable', es_info['version'],
es_info['hashes']['chromium']))
return builds_metadata
def get_release_milestone(build_type, platform):
"""Return milestone for a particular release."""
if build_type == 'head':
actual_build_type = 'canary'
else:
actual_build_type = build_type
builds_metadata = get_production_builds_info_from_cd(platform)
for build_metadata in builds_metadata:
if build_metadata.build_type == actual_build_type:
version_parts = build_metadata.version.split('.')
milestone = version_parts[0]
if milestone and milestone.isdigit():
return int(milestone)
if actual_build_type == 'canary':
# If there is no canary for that platform, just return canary from windows.
return get_release_milestone('canary', 'windows')
return None
def get_build_to_revision_mappings(platform=None):
"""Gets the build information."""
if not platform:
platform = environment.platform()
result = {}
build_info_json = _fetch_releases_from_chromiumdash(platform)
for info in build_info_json:
build_type = info['channel'].lower()
if build_type == 'extended':
build_type = 'extended_stable'
version = info['version']
revision = str(info['chromium_main_branch_position'])
result[build_type] = {'revision': revision, 'version': version}
# Hack: pretend Windows extended stable info to be Linux extended stable info.
# Because Linux doesn't have extended stable channel.
if platform.lower() == 'linux':
es_info = _fetch_releases_from_chromiumdash(
'WINDOWS', channel='Extended')[0]
result['extended_stable'] = {
'revision': str(es_info['chromium_main_branch_position']),
'version': es_info['version']
}
return result
|
Apreche/Wikibloks | allauth/utils.py | Python | mit | 2,568 | 0.001558 | from django.conf import settings
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
fr | om django.core.validators import validate_email, ValidationError
from django.db.models import EmailField
from emailconfirmation.models import EmailAddress
def get_login_redirect_url(request):
"""
Returns a url to redirect to after the login
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next']
return next
elif 'next' in request.GET:
return request.GET.get('next')
elif 'next' in request.POST:
| return request.POST.get('next')
else:
return getattr(settings, 'LOGIN_REDIRECT_URL', '/')
def generate_unique_username(txt):
username = slugify(txt.split('@')[0])
max_length = User._meta.get_field('username').max_length
i = 0
while True:
try:
if i:
pfx = str(i+1)
else:
pfx = ''
ret = username[0:max_length-len(pfx)] + pfx
User.objects.get(username=ret)
i += 1
except User.DoesNotExist:
return ret
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def get_email_address(email, exclude_user=None):
"""
Returns an EmailAddress instance matching the given email. Both
User.email and EmailAddress.email are considered candidates. This
was done to deal gracefully with inconsistencies that are inherent
due to the duplication of the email field in User and
EmailAddress. In case a User.email match is found the result is
returned in a temporary EmailAddress instance.
"""
try:
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.get(email__iexact=email)
except EmailAddress.DoesNotExist:
try:
users = User.objects
if exclude_user:
users = users.exclude(user=exclude_user)
usr = users.get(email__iexact=email)
ret = EmailAddress(user=usr,
email=email,
verified=False,
primary=True)
except User.DoesNotExist:
ret = None
return ret
|
24OI/CodeStack | cpplint.py | Python | gpl-2.0 | 241,962 | 0.008853 | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is | provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used fo | r deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/exp |
oopsno/arena | src/arena/cmd.py | Python | bsd-3-clause | 1,864 | 0.000536 | import subprocess
class CmdArgument(object):
_PREFIX = None
_KEY = None
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
@property
def prefix(self):
return self._PREFIX
@property
def key(self):
return self._KEY
def format(self) -> str:
raise NotImplementedError
| def __str__(self):
try:
return self.format()
except NotImplementedError:
return '{}{} "{}"'.format(self._PREFIX, self._KEY, self._value)
class Cmd(object):
def __init__(self, cmd: | str):
self._cmd = cmd
self._args = []
def config(self, *args: [CmdArgument]):
self.clear_args()
self.add_arg(args)
def add_arg(self, *arg: [CmdArgument]):
self._args.extend(arg)
def clear_args(self):
self._args.clear()
@property
def command(self) -> str:
return self._cmd
@property
def arguments(self) -> [CmdArgument]:
return self._args
@property
def options(self) -> str:
raise NotImplementedError
def _dump_args(self):
return ' '.join(map(str, self._args))
def __str__(self):
return '{} {}'.format(self._cmd, self._dump_args())
def run(self, **kwargs):
subprocess.run(str(self), **kwargs)
def su_run(self, **kwargs):
raise NotImplementedError("DO NOT execute {} as root".format(self._cmd))
def cmd_argument(key: str, prefix: str = None):
def _cmm_argument(cls: CmdArgument):
if not issubclass(cls, CmdArgument):
return TypeError('{!r} must deriving from CmdArgument'.format(cls))
cls._KEY = key
if prefix:
cls._PREFIX = prefix
else:
cls._PREFIX = '-'
return cls
return _cmm_argument
|
ncbray/pystream | bin/decompiler/destacker/maindestacker.py | Python | apache-2.0 | 20,089 | 0.030514 | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from util.typedispatch import *
import collections
from language.python.ast import *
from .. import errors
from . import pythonstack
from .. import flowblocks
from . import instructiontranslator
from language.python.annotations import codeOrigin
from application.errors import TemporaryLimitation
PythonStack = pythonstack.PythonStack
def appendSuite(current, next):
# Only append if the next value is worthwhile
if next and next.significant():
if not current or not current.significant():
# First block
if not isinstance(next, Suite):
next = Suite([next])
current = next
elif not isinstance(current, Suite):
# Second block
current = Suite([current, next])
else:
# All subsequent blocks
current.append(next)
return current
class SSADefinitions(object):
def __init__(self):
self.defn = {}
def define(self, local, defn, merge=False):
assert not local in self.defn, "Attempt to redefine %r" % local
assert local != defn
# Reach for the definition
while defn in self.defn:
assert defn != self.defn[defn]
defn = self.defn[defn]
self.defn[local] = defn
#print local, "->", defn
return local
def reachTrivial(self, expr):
defn = self.definition(expr)
if defn.isReference():
return defn
else:
return expr
def definition(self, local):
return self.defn.get(local, local)
class DestackVisitor(TypeDispatcher):
__namedispatch__ = True # HACK emulates old visitor
def __init__(self, code, mname, compiler, callback, trace=False):
TypeDispatcher.__init__(self)
self.ssa = SSADefinitions()
self.code = code
self.moduleName = mname
self.locals = {}
self.defns = collections.defaultdict(dict)
self.compiler = compiler
self.callback = callback
self.trace = trace
def getDefns(self, block):
if block in self.defns:
return self.defns[block]
elif isinstance(block, Suite):
assert block.blocks
return self.getDefns(block.blocks[0])
else:
return {}
def visitReturn(self, block, stack):
assert isinstance(stack, PythonStack)
assert stack.size() >= 1
arg = stack.pop()
defn = self.ssa.definition(arg)
# Special case: returning None
if isinstance(defn, Existing) and defn.object.isConstant() and defn.object.pyobj == None:
arg = defn
outblock = Return([arg])
return outblock, None
def visitRaise(self, block, stack):
assert isinstance(stack, PythonStack)
assert block.nargs <= stack.size()
exception = None
parameter = None
traceback = None
# The order is revered compared to the exception handlers? (!)
if block.nargs >= 3:
traceback = stack.pop()
if block.nargs >= 2:
parameter = stack.pop()
if block.nargs >= 1:
exception = stack.pop()
outblock = Raise(exception, parameter, traceback)
return outblock, None
def visitBreak(self, block, stack):
assert isinstance(stack, PythonStack)
outblock = Break()
return outblock, None
def visitContinue(self, block, stack):
assert isinstance(stack, PythonStack)
outblock = Continue()
return outblock, None
def visitNormalExit(self, block, stack):
assert isinstance(stack, PythonStack)
return None, stack
def visitNormalEntry(self, block, stack):
assert isinstance(stack, PythonStack)
return None, stack
def visitLinear(self, block, stack):
assert isinstance(stack, PythonStack), stack
# Insurance, but probabally unessisary
stack = stack.duplicate()
t = instructiontranslator.InstructionTranslator(self.code, self.moduleName, self.ssa, self.locals, self.compiler, self.callback, self.trace)
inst, defn, stack = t.translate(block.instructions, stack)
outblock = Suite(inst)
self.defns[outblock] = defn
return outblock, stack
def visitSuiteRegion(self, block, stack):
assert isinstance(stack, PythonStack), stack
return self.handleLinearRegion(block, stack)
def visitMerge(self, block, stack):
assert isinstance(stack, PythonStack), stack
assert len(block.incoming) == 1 and block.numEntries()==1, block.incoming
return None, stack
def handleLinearRegion(self, region, stack):
assert isinstance(stack, PythonStack), stack
block = | region.entry()
output = None
newdefns = {}
while block:
outblock, stack = self(block, stack)
if outblock != None and outblock in self.defns:
for k, v in self.defns[outblock].iteritems():
assert not k in newd | efns, (k, newdefns)
newdefns[k] = v
assert isinstance(stack, PythonStack) or stack==None, block
output = appendSuite(output, outblock)
assert block.numExits() <= 1
if block.numExits() == 0:
# Stack exists if this is a normal exit
break
elif stack == None:
break
else:
#assert stack != None or not block.next, (block, region, output)
block = block.next
self.defns[output] = newdefns
return output, stack
def getTOS(self, block, stack):
assert block.origin, block
lcl = stack.peek()
defn = self.ssa.definition(lcl)
if defn.alwaysReturnsBoolean():
# Don't convert if we can infer that it's a boolean value
conditional = lcl
else:
conditional = ConvertToBool(lcl)
conditional.rewriteAnnotation(origin=block.origin)
# Predict what the resulting value can be
maybeTrue = True
maybeFalse = True
if isinstance(defn, Existing) and defn.object.isConstant():
# TODO wrap with exception handling mechanism
b = bool(defn.object.pyobj)
maybeTrue = b
maybeFalse = not b
return conditional, stack, (maybeTrue, maybeFalse)
def handleCond(self, cond, stack):
assert isinstance(stack, PythonStack), stack
if isinstance(cond, flowblocks.Linear):
block, stack = self(cond, stack)
conditional, framset, (maybeTrue, maybeFalse) = self.getTOS(cond, stack)
temp = Local(None)
assert isinstance(conditional, Expression), conditional
assign = Assign(conditional, [temp])
block.append(assign)
condition = Condition(block, temp)
#condition = Condition(block, conditional)
tstack = stack.duplicate()
fstack = stack.duplicate()
else:
condition, tstack, fstack, (maybeTrue, maybeFalse) = self(cond, stack)
assert isinstance(condition, Condition), condition
return condition, tstack, fstack, (maybeTrue, maybeFalse)
def visitCheckStack(self, block, stack):
conditional, framset, (maybeTrue, maybeFalse) = self.getTOS(block, stack)
assert isinstance(conditional, Expression), conditional
temp = Local(None)
assign = Assign(conditional, [temp])
block = Suite([assign])
condition = Condition(block, temp)
#condition = Condition(Suite(), conditional)
tstack = stack.duplicate()
fstack = stack.duplicate()
return condition, tstack, fstack, (maybeTrue, maybeFalse)
def visitShortCircutOr(self, block, stack):
assert isinstance(stack, PythonStack), stack
terms = []
stacks = []
def accumulate(condition, stack):
# TODO assert no abnormal exits?
assert isinstance(stack, PythonStack), stack
terms.append(condition)
stacks.append(stack)
maybeTrue = False
maybeFalse = True
fstack = stack
for term in block.terms:
condition, tstack, fstack, (termMaybeTrue, termMaybeFalse) = self.handleCond(term, fstack)
accumulate(condition, tstack)
maybeTrue |= termMaybeTrue
if not termMaybeFalse:
maybeFalse = False
break
#tstack = pythonstack.mergeStacks(stacks, [term.onExit for term in terms])
# HACK
tstack = pythonstack.mergeStacks(stacks, [[] for term in terms])
if len(terms) == 1:
return terms[0], tstack, fstack, (maybeTrue, maybeFalse)
condition = ShortCircutOr(terms)
# Convert into a condition.
lcl = Local(N |
tamasgal/km3pipe | examples/network/udp_dispatcher.py | Python | mit | 1,173 | 0.000853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Tamas Gal <tgal@km3net.de>
# License: MIT
#!/usr/bin/env python
# vim: ts=4 sw=4 et
"""
=============================
UDP Forwarder for ControlHost
=============================
A simple UDP forwarder for ControlHost messages.
This application is used to forward monitoring channel data from Ligier
to a given UDP address.
"""
import socket
import sys
import km3pipe as kp
__author__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
class U | DPFor | warder(kp.Module):
def configure(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.counter = 0
def process(self, blob):
if str(blob["CHPrefix"].tag) == "IO_MONIT":
self.sock.sendto(blob["CHData"], ("127.0.0.1", 56017))
if self.counter % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
self.counter += 1
return blob
pipe = kp.Pipeline()
pipe.attach(
kp.io.CHPump,
host="localhost",
port=5553,
tags="IO_MONIT",
timeout=60 * 60 * 24 * 7,
max_queue=1000,
timeit=True,
)
pipe.attach(UDPForwarder)
pipe.drain()
|
Jwely/cdo-api-py | docs/example/dc_weather_data.py | Python | mit | 2,346 | 0.002984 | from cdo_api_py import Client
import pandas as pd
from datetime import datetime
from pprint import pprint
# initialize a client with a developer token ,
# note 5 calls per second and 1000 calls per day limit for each token
token = "my token here!"
my_client = Client(token, default_units=None, default_limit=1000)
# the other valid option for units is 'standard', and default_limit maxes out at 1000
# first lets see what endpoints are associated with the API
# you can read more about this from NOAAs NCDC at
# https://www.ncdc.noaa.gov/cdo-web/webservices/v2#gettingStarted
pprint(my_client.list_endpoints())
# request a list of available datasets (about 11) with
pprint(my_client.list_datasets())
# there are more than 1000 datatypes, but you can see them all with
pprint(my_client.list_datatypes())
# define the extent we are interested in. in this case the DC metro area.
extent = {
"north": 39.14,
"south": 38.68,
"east": -76.65,
"west": -77.35,
}
# lets define the date range we're interested in as well, December 2016
startdate = datetime(2016, 12, 1)
enddate = datetime(2016, 12, 31)
# after examining the available datasets, we decided 'GHCND' is the one we want,
# and that we really want daily min and max temperatures
datasetid='GHCND'
datatypeid=['TMIN', 'TMAX', 'PRCP']
# lets find stations that meet all our criteria
stations = my_client.find_stations(
datasetid=datasetid,
extent=extent,
startdate=startdate,
enddate=enddate,
datatypeid=datatypeid,
return_dataframe=True)
pprint(stations | )
# we can get big lists of station data with
big_df = pd.DataFrame()
for rowid, station in stations.iterrows(): # remember this is a pandas dataframe!
station_data = my_client.get_data_by_station(
datasetid=datasetid,
stationid=station['id'], # remember this is a pandas dataframe
startdate=startdate,
| enddate=enddate,
return_dataframe=True, # this defaults to True
include_station_meta=True # flatten station metadata with ghcnd readings
)
pprint(station_data)
big_df = pd.concat([big_df, station_data], sort=False)
# Now we can do whatever we want with our big dataframe. Lets sort it by date and save it
print(big_df)
big_df = big_df.sort_values(by='date').reset_index()
big_df.to_csv('dc_ghcnd_example_output.csv')
|
telminov/django-voximplant | voximplant/migrations/0009_auto_20160516_0649.py | Python | mit | 813 | 0.00123 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-16 06:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voximplant', '0008_auto_20160514_0800'),
]
operations = [
migrations.RemoveField(
model_name='calllist',
name='completed',
),
migrations.AddFiel | d(
model_name='calllist',
name='downloaded',
field=models.DateTimeField(blank=True, help_text='Last datetime of checking state from VoxImplant', null=True),
),
migrations.AlterField(
model_name='calllistphone',
| name='completed',
field=models.DateTimeField(blank=True, null=True),
),
]
|
clarkperkins/stackdio | stackdio/core/viewsets.py | Python | apache-2.0 | 13,461 | 0.002303 | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404
from guardian.shortcuts import get_groups_with_perms, get_users_with_perms, remove_perm
from rest_framework import viewsets
from rest_framework.serializers import ListField, SlugRelatedField, ValidationError
from stackdio.api.users.models import get_user_queryset
from stackdio.core import fields, mixins, serializers
from stackdio.core.config import StackdioConfigException
from stackdio.core.permissions import StackdioPermissionsModelPermissions
from stackdio.core.shortcuts import get_groups_with_model_perms, get_users_with_model_perms
try:
from django_auth_ldap.backend import LDAPBackend
except ImportError:
LDAPBackend = None
logger = logging.getLogger(__name__)
def _filter_perms(available_perms, perms):
ret = []
for perm in perms:
if perm in available_perms:
ret.append(perm)
return ret
class UserSlugRelatedField(SlugRelatedField):
def to_internal_value(self, data):
try:
return super(UserSlugRelatedField, self).to_internal_value(data)
except ValidationError:
if settings.LDAP_ENABLED:
if LDAPBackend is None:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap isn\'t '
'installed. Please install django_auth_ldap')
# Grab the ldap user and try again
user = LDAPBackend().populate_user(data)
if user is not None:
return super(UserSlugRelatedField, self).to_internal_value(data)
# Nothing worked, just re-raise the exception
raise
class StackdioBasePermissionsViewSet(mixins.BulkUpdateModelMixin, viewsets.ModelViewSet):
"""
Viewset for creating permissions endpoints
"""
user_or_group = None
model_or_object = None
lookup_value_regex = r'[\w.@+-]+'
parent_lookup_field = 'pk'
parent_lookup_url_kwarg = None
def get_model_name(self):
raise NotImplementedError('`get_model_name()` must be implemented.')
def get_app_label(self):
raise NotImplementedError('`get_app_label()` must be implemented.')
def get_serializer_class(self):
user_or_group = self.get_user_or_group()
model_or_object = self.get_model_or_object()
model_name = self.get_model_name()
app_label = self.get_app_label()
super_cls = self.switch_model_object(serializers.StackdioModelPermissionsSerializer,
serializers.StackdioObjectPermissionsSerializer)
default_parent_lookup_url_kwarg = 'parent_{}'.format(self.parent_lookup_field)
url_field_kwargs = {
'view_name': 'api:{0}:{1}-{2}-{3}-permissions-detail'.format(
app_label,
model_name,
model_or_object,
user_or_group
),
'permission_lookup_field': self.lookup_field,
'permission_lookup_url_kwarg': self.lookup_url_kwarg or self.lookup_field,
'lookup_field': self.parent_lookup_field,
'lookup_url_kwarg': self.parent_lookup_url_kwarg or default_parent_lookup_url_kwarg,
}
url_field_cls = self.switch_model_object(
fields.HyperlinkedModelPermissionsField,
fields.HyperlinkedObjectPermissionsField,
)
# Create a class
class StackdioUserPermissionsSerializer(super_cls):
user = UserSlugRelatedField(slug_field='username', queryset=get_user_queryset())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'user'
class StackdioGroupPermissionsSerializer(super_cls):
group = SlugRelatedField(slug_field='name', queryset=Group.objects.all())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'group'
return self.switch_user_group(StackdioUserPermissionsSerializer,
StackdioGroupPermissionsSerializer)
def get_user_or_group(self):
assert self.user_or_group in ('user', 'group'), (
"'%s' should include a `user_or_group` attribute that is one of 'user' or 'group'."
% self.__class__.__name__
)
return self.user_or_group
def switch_user_group(self, if_user, if_group):
return {
'user': if_user,
'group': if_group,
}.get(self.get_user_or_group())
def get_model_or_object(self):
assert self.model_or_object in ('model', 'object'), (
"'%s' should include a `model_or_object` attribute that is one of 'model' or 'object'."
% self.__class__.__name__
)
return self.model_or_object
def switch_model_object(self, if_model, if_object):
return {
'model': if_model,
'object': if_object,
}.get(self.get_model_or_object())
def _transform_perm(self, model_name):
def do_tranform(item):
# pylint: disable=unused-variable
perm, sep, empty = item.partition('_' + model_name)
return perm
return do_tranform
def get_object(self):
queryset = self.get_queryset()
url_kwarg = self.lookup_url_kwarg or self.lookup_field
name_attr = self.switch_user_group('username', 'name')
for obj in queryset:
auth_obj = obj[self.get_user_or_group()]
if self.kwargs[url_kwarg] == getattr(auth_obj, name_attr):
return obj
raise Http404('No permissions found for %s' % self.kwargs[url_kwarg])
class Stackd | ioModelPermissionsViewSet(StackdioBasePermissionsViewSet):
model_cls = None
model_or_object = 'model'
permission_classes = (StackdioPermissionsModelPermissions,)
def get_model_cls(self):
assert self.model_cls, (
"'%s' should include a `model_cls` attribute or override the `get_model_cls()` method."
% self.__class_ | _.__name__
)
return self.model_cls
def get_model_name(self):
return self.get_model_cls()._meta.model_name
def get_app_label(self):
ret = self.get_model_cls()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_model_permissions(self):
return getattr(self.get_model_cls(),
'model_permissions',
getattr(self, 'model_permissions', ()))
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
ret = []
for permission_cls in self.permission_classes:
permission = permission_cls()
# Inject our model_cls into the permission
if isinstance(permission, StackdioPermissionsModelPermissions) \
and permission.model_cls is None:
permission.model_cls = self.model_cls
ret.append(permission)
return ret
def get_queryset(self): # pylint: disable=method-hidden
model_cls = self.get_model_cls()
model_name = mod |
nachandr/cfme_tests | cfme/tests/physical_infrastructure/ui/test_physical_server_details.py | Python | gpl-2.0 | 3,611 | 0.001662 | import pytest
from cfme.physical.provider.lenovo import LenovoProvider
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [pytest.mark.tier(3), pytest.mark.provider([LenovoProvider], scope="module")]
@pytest.fixture(scope="module")
def physical_server(appliance, provider, setup_provider_modscope):
# Get and return the first physical server
physical_servers = appliance.collections.physical_servers.all(provider)
yield physical_servers[0]
def test_physical_server_details(physical_server):
"""Navigate to the physical server details page and verify that the page is displayed
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
physical_server_view = navigate_to(physical_server, 'Details')
assert physical_server_view.is_displayed
def test_physical_server_details_dropdowns(physical_server):
"""Navigate to the physical server details page and verify that the menus are present
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
physical_server_view = navigate_to(physical_server, 'Details')
configuration_items = physical_server_view.toolbar.configuration.items
assert "Refresh Relationships and Power States" in configuration_items
power_items = physical_server_view.toolbar.power.items
assert "Power On" in power_items
assert "Power Off" in power_items
assert "Power Off Immediately" in power_items
assert "Restart" in power_items
assert "Restart Immediately" in power_items
assert "Restart to System Setup" in power_items
assert "Restart Management Controller" in power_items
identify_items = physical_server_view.toolbar.identify.items
assert "Blink LED" in identify_items
assert "Turn On LED" in identify_items
assert "Turn Off LED" in identify_items
policy_items = physical_server_view.toolbar.policy.items
assert "Manage Policies" in policy_items
assert "Edit Tags" in policy_items
lifecycle_items = physical_server_view.toolbar.lifecycle.items
assert "Provision Physical Server" in lifecycle_items
monitoring_items = physical_server_view.toolbar.monitoring.items
assert "Timelines" in monitoring_items
def test_network_devices(physical_server):
"""Navigate to the Network Devices page and verify that the page is displayed
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
num_network_devices = physical_server.num_network_devices()
network_device_view = navigate_to(physical_server, 'NetworkDevices')
assert(network_device_view.is_displayed if num_network_devices != "0" else
not network_device_view.is_displayed)
def test_storage_devices(physical_server):
"""Navigate to the Storage Devices page and verify that the page is displayed
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
num_storage_devices = physical_server.num_storage_devices()
storage_device_view = navigate_to(physical_server, 'StorageDevices')
assert(storage_device_view.is_displayed if num_storage_d | evices != "0" else
not storage_device_view.is_displayed)
def test_physical_server_details_stats(physical_server):
"""Navigate to the physical server details pag | e and verify that the stats match
Polarion:
assignee: rhcf3_machine
casecomponent: Infra
initialEstimate: 1/4h
"""
physical_server.validate_stats(ui=True)
|
macarthur-lab/xbrowse | seqr/views/apis/users_api.py | Python | agpl-3.0 | 7,234 | 0.003179 | import json
import urllib
from anymail.exceptions import AnymailError
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from seqr.utils.communication_utils import send_welcome_email
from seqr.views.utils.json_utils import cre | ate_json_response
from seqr.views.utils.orm_to_json_utils import _get_json_for_user, get_json_for_ | project_collaborator_list, \
get_project_collaborators_by_username
from seqr.views.utils.permissions_utils import get_projects_user_can_view, get_project_and_check_permissions, CAN_EDIT
from seqr.model_utils import create_xbrowse_project_collaborator, delete_xbrowse_project_collaborator
from settings import API_LOGIN_REQUIRED_URL
class CreateUserException(Exception):
def __init__(self, error, status_code=400, existing_user=None):
Exception.__init__(self, error)
self.status_code = status_code
self.existing_user = existing_user
@login_required(login_url=API_LOGIN_REQUIRED_URL)
@csrf_exempt
def get_all_collaborators(request):
if request.user.is_staff:
collaborators = {user.username: _get_json_for_user(user) for user in User.objects.exclude(email='')}
else:
collaborators = {}
for project in get_projects_user_can_view(request.user):
collaborators.update(get_project_collaborators_by_username(project, include_permissions=False))
return create_json_response(collaborators)
@login_required(login_url=API_LOGIN_REQUIRED_URL)
@csrf_exempt
def get_all_staff(request):
staff_analysts = {staff.username: _get_json_for_user(staff) for staff in User.objects.filter(is_staff=True)}
return create_json_response(staff_analysts)
@csrf_exempt
def forgot_password(request):
request_json = json.loads(request.body)
if not request_json.get('email'):
return create_json_response({}, status=400, reason='Email is required')
users = User.objects.filter(email__iexact=request_json['email'])
if users.count() != 1:
return create_json_response({}, status=400, reason='No account found for this email')
user = users.first()
email_content = """
Hi there {full_name}--
Please click this link to reset your seqr password:
{base_url}users/set_password/{password_token}?reset=true
""".format(
full_name=user.get_full_name(),
base_url=settings.BASE_URL,
password_token=urllib.quote_plus(user.password),
)
try:
user.email_user('Reset your seqr password', email_content, fail_silently=False)
except AnymailError as e:
return create_json_response({}, status=getattr(e, 'status_code', None) or 400, reason=str(e))
return create_json_response({'success': True})
@csrf_exempt
def set_password(request, username):
user = User.objects.get(username=username)
request_json = json.loads(request.body)
if not request_json.get('password'):
return create_json_response({}, status=400, reason='Password is required')
user.set_password(request_json['password'])
user.first_name = request_json.get('firstName') or ''
user.last_name = request_json.get('lastName') or ''
user.save()
u = authenticate(username=username, password=request_json['password'])
login(request, u)
return create_json_response({'success': True})
@staff_member_required(login_url=API_LOGIN_REQUIRED_URL)
@csrf_exempt
def create_staff_user(request):
try:
_create_user(request, is_staff=True)
except CreateUserException as e:
return create_json_response({'error': e.message}, status=e.status_code, reason=e.message)
return create_json_response({'success': True})
@login_required(login_url=API_LOGIN_REQUIRED_URL)
@csrf_exempt
def create_project_collaborator(request, project_guid):
project = get_project_and_check_permissions(project_guid, request.user, permission_level=CAN_EDIT)
try:
user = _create_user(request)
except CreateUserException as e:
if e.existing_user:
return _update_existing_user(e.existing_user, project, json.loads(request.body))
else:
return create_json_response({'error': e.message}, status=e.status_code, reason=e.message)
project.can_view_group.user_set.add(user)
create_xbrowse_project_collaborator(project, user)
return create_json_response({
'projectsByGuid': {project_guid: {'collaborators': get_json_for_project_collaborator_list(project)}}
})
def _create_user(request, is_staff=False):
request_json = json.loads(request.body)
if not request_json.get('email'):
raise CreateUserException('Email is required')
existing_user = User.objects.filter(email__iexact=request_json['email']).first()
if existing_user:
raise CreateUserException('This user already exists', existing_user=existing_user)
username = User.objects.make_random_password()
user = User.objects.create_user(
username,
email=request_json['email'],
first_name=request_json.get('firstName') or '',
last_name=request_json.get('lastName') or '',
is_staff=is_staff,
)
try:
send_welcome_email(user, request.user)
except AnymailError as e:
raise CreateUserException(str(e), status_code=getattr(e, 'status_code', None) or 400)
return user
def _update_existing_user(user, project, request_json):
user.first_name = request_json.get('firstName') or ''
user.last_name = request_json.get('lastName') or ''
user.save()
project.can_view_group.user_set.add(user)
if request_json.get('hasEditPermissions'):
project.can_edit_group.user_set.add(user)
else:
project.can_edit_group.user_set.remove(user)
create_xbrowse_project_collaborator(
project, user, collaborator_type='manager' if request_json.get('hasEditPermissions') else 'collaborator')
return create_json_response({
'projectsByGuid': {project.guid: {'collaborators': get_json_for_project_collaborator_list(project)}}
})
@login_required(login_url=API_LOGIN_REQUIRED_URL)
@csrf_exempt
def update_project_collaborator(request, project_guid, username):
project = get_project_and_check_permissions(project_guid, request.user, permission_level=CAN_EDIT)
user = User.objects.get(username=username)
request_json = json.loads(request.body)
return _update_existing_user(user, project, request_json)
@login_required(login_url=API_LOGIN_REQUIRED_URL)
@csrf_exempt
def delete_project_collaborator(request, project_guid, username):
project = get_project_and_check_permissions(project_guid, request.user, permission_level=CAN_EDIT)
user = User.objects.get(username=username)
project.can_view_group.user_set.remove(user)
project.can_edit_group.user_set.remove(user)
delete_xbrowse_project_collaborator(project, user)
return create_json_response({
'projectsByGuid': {project_guid: {'collaborators': get_json_for_project_collaborator_list(project)}}
})
|
cmshobe/landlab | landlab/graph/hex/hex.py | Python | mit | 27,221 | 0.001212 | r"""
Examples
--------
::
* - *
/ \ / \
* - * - *
/ \ / \ / \
* - * - * - *
\ / \ / \ /
* - * - *
\ / \ /
* - *
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((5, 2), node_layout="hex", sort=True)
>>> graph.number_of_nodes
14
>>> graph.x_of_node
array([ 1. , 2. ,
0.5, 1.5, 2.5,
0. , 1. , 2. , 3. ,
0.5, 1.5, 2.5,
1. , 2. ])
>>> graph.number_of_links
29
>>> graph.number_of_patches
16
::
* - * - * - *
\ / \ / \ / \
* - * - * - *
/ \ / \ / \ /
* - * - * - *
>>> from landlab.graph import TriGraph
>>> graph = TriGraph((3, 4), orientation="horizontal", node_layout="rect", sort=True)
>>> graph.number_of_nodes
12
>>> graph.x_of_node.reshape((3, 4))
array([[ 0. , 1. , 2. , 3. ],
[ 0.5, 1.5, 2.5, 3.5],
[ 0. , 1. , 2. , 3. ]])
>>> graph.number_of_links
23
>>> graph.number_of_patches
12
"""
from functools import lru_cache
import numpy as np
from ...core.utils import as_id_array
from ...utils.decorators import cache_result_in_object, make_return_array_immutable
from ..graph import Graph
from ..voronoi.voronoi import DelaunayGraph
class HorizontalRectTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_rect_horizontal
x_of_node = np.empty(shape[0] * shape[1], dtype=float)
y_of_node = np.empty(shape[0] * shape[1], dtype=float)
fill_xy_of_node_rect_horizontal(shape, x_of_node, y_of_node)
x_of_node[:] *= spacing
y_of_node[:] *= spacing * np.sin(np.pi / 3.0)
x_of_node[:] += xy_of_lo | wer_left[0]
y_of_node[:] += xy_of_lower_left[1]
return x_of_node, y_of_node
class VerticalRectTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_rect_vertical
n_rows, n_cols = shape
x_spacing = np.sin(np.pi / 3.0) * spacing
| y_spacing = spacing
x_of_node = np.empty(n_rows * n_cols, dtype=float)
y_of_node = np.empty(n_rows * n_cols, dtype=float)
fill_xy_of_node_rect_vertical(shape, x_of_node, y_of_node)
x_of_node *= x_spacing
y_of_node *= y_spacing
x_of_node += xy_of_lower_left[0]
y_of_node += xy_of_lower_left[1]
return x_of_node, y_of_node
x_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_cols // 2) * x_spacing * 2.0 + x_spacing + xy_of_lower_left[0]
)
x_of_node[:, : (n_cols + 1) // 2] = (
np.arange((n_cols + 1) // 2) * x_spacing * 2.0 + xy_of_lower_left[0]
)
y_of_node[:, : (n_cols + 1) // 2] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1]
).reshape((n_rows, 1))
y_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1] + y_spacing * 0.5
).reshape((n_rows, 1))
return x_of_node.reshape(-1), y_of_node.reshape(-1)
class HorizontalHexTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_hex_horizontal
n_rows, n_cols = shape
n_nodes = n_rows * n_cols + (n_rows // 2) ** 2
x_of_node = np.empty(n_nodes, dtype=float)
y_of_node = np.empty(n_nodes, dtype=float)
fill_xy_of_node_hex_horizontal(shape, x_of_node, y_of_node)
x_of_node[:] *= spacing
y_of_node[:] *= spacing * np.sin(np.pi / 3.0)
x_of_node[:] += xy_of_lower_left[0]
y_of_node[:] += xy_of_lower_left[1]
return x_of_node, y_of_node
class VerticalHexTriGraphCython:
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
from .ext.hex import fill_xy_of_node_hex_vertical
n_rows, n_cols = shape
n_nodes = n_cols * n_rows + (n_cols // 2) ** 2
x_of_node = np.empty(n_nodes, dtype=float)
y_of_node = np.empty(n_nodes, dtype=float)
fill_xy_of_node_hex_vertical(shape, x_of_node, y_of_node)
x_of_node[:] *= spacing * np.sin(np.pi / 3.0)
y_of_node[:] *= spacing
x_of_node[:] += xy_of_lower_left[0]
y_of_node[:] += xy_of_lower_left[1]
return x_of_node, y_of_node
class HorizontalRectTriGraph:
@staticmethod
def number_of_nodes(shape):
n_rows, n_cols = shape
return n_rows * n_cols
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
n_rows, n_cols = shape
x_spacing, y_spacing = spacing, spacing * np.sin(np.pi / 3.0)
x_of_node, y_of_node = np.meshgrid(
np.arange(n_cols) * x_spacing + xy_of_lower_left[0],
np.arange(n_rows) * y_spacing + xy_of_lower_left[1],
)
x_of_node[1::2] += spacing * 0.5
return x_of_node.reshape(-1), y_of_node.reshape(-1)
@staticmethod
def corner_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalRectTriGraph
>>> HorizontalRectTriGraph.corner_nodes((3, 4))
(11, 8, 0, 3)
>>> HorizontalRectTriGraph.corner_nodes((3, 2))
(5, 4, 0, 1)
>>> HorizontalRectTriGraph.corner_nodes((7, 1))
(6, 6, 0, 0)
>>> HorizontalRectTriGraph.corner_nodes((1, 3))
(2, 0, 0, 2)
"""
n_rows, n_cols = shape
return (n_rows * n_cols - 1, n_cols * (n_rows - 1), 0, n_cols - 1)
@staticmethod
def number_of_perimeter_nodes(shape):
if 1 in shape:
return np.prod(shape)
return 2 * shape[0] + 2 * (shape[1] - 2)
@staticmethod
def perimeter_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import HorizontalRectTriGraph
>>> HorizontalRectTriGraph.perimeter_nodes((3, 2))
array([1, 3, 5, 4, 2, 0])
"""
return np.concatenate(HorizontalRectTriGraph.nodes_at_edge(shape))
@staticmethod
def nodes_at_edge(shape):
n_rows, n_cols = shape
if n_rows == n_cols == 1:
return (np.array([0]),) + (np.array([], dtype=int),) * 3
(
northeast,
northwest,
southwest,
southeast,
) = HorizontalRectTriGraph.corner_nodes(shape)
if n_rows > 1:
south = np.arange(southwest, southeast)
else:
south = np.array([southwest], dtype=int)
if n_cols > 1:
west = np.arange(northwest, southwest, -n_cols)
else:
west = np.array([northwest], dtype=int)
return (
np.arange(southeast, northeast, n_cols),
np.arange(northeast, northwest, -1),
west,
south,
)
class VerticalRectTriGraph:
@staticmethod
def number_of_nodes(shape):
n_rows, n_cols = shape
return n_rows * n_cols
@staticmethod
def xy_of_node(shape, spacing=1.0, xy_of_lower_left=(0.0, 0.0)):
n_rows, n_cols = shape
x_spacing, y_spacing = spacing * np.sin(np.pi / 3.0), spacing
x_of_node = np.empty((n_rows, n_cols), dtype=float)
y_of_node = np.empty((n_rows, n_cols), dtype=float)
x_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_cols // 2) * x_spacing * 2.0 + x_spacing + xy_of_lower_left[0]
)
x_of_node[:, : (n_cols + 1) // 2] = (
np.arange((n_cols + 1) // 2) * x_spacing * 2.0 + xy_of_lower_left[0]
)
y_of_node[:, : (n_cols + 1) // 2] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1]
).reshape((n_rows, 1))
y_of_node[:, (n_cols + 1) // 2 :] = (
np.arange(n_rows) * y_spacing + xy_of_lower_left[1] + y_spacing * 0.5
).reshape((n_rows, 1))
return x_of_node.reshape(-1), y_of_node.reshape(-1)
@staticmethod
def corner_nodes(shape):
"""
Examples
--------
>>> from landlab.graph.hex.hex import VerticalRectTriGraph
|
sephiroth6/nodeshot | nodeshot/core/layers/migrations/0001_initial.py | Python | gpl-3.0 | 8,891 | 0.008211 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Layer'
db.create_table('layers_layer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 10, 2, 0, 0))),
('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 10, 2, 0, 0))),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('text', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('is_published', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_external', self.gf('django.db.models.fields.BooleanField')(default=False)),
('center', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True, blank=True)),
('area', self.gf('django.contrib.gis.db.models.fields.PolygonField')(null=True, blank=True)),
('zoom', self.gf('django.db.models.fields.SmallIntegerField')(default=12)),
('organization', self.gf('django.db.models.fields.CharField')(max_length=255)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('minimum_distance', self.gf('django.db.models.fields.IntegerField')(default=0)),
('new_nodes_allowed', self.gf('django.db.models.fields.BooleanField')(default=True)),
('data', self.gf(u'django_hstore.fields.DictionaryField')(null=True, blank=True)),
))
db.send_create_signal('layers', ['Layer'])
# Adding M2M table for field mantainers on 'Layer'
m2m_table_name = db.shorten_name('layers_layer_mantainers')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('layer', models.ForeignKey(orm['layers.layer'], null=False)),
('profile', models.ForeignKey(orm['profiles.profile'], null=False))
))
db.create_unique(m2m_table_name, ['layer_id', 'profile_id'])
def backwards(self, orm):
# Deleting model 'Layer'
db.delete_table('layers_layer')
# Removing M2M table for field mantainers on 'Layer'
db.delete_table(db.shorten_name('layers_layer_mantainers'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'layers.layer': {
'Meta': {'object_name': 'Layer'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'area': ('django.contrib.gis.db.models.fields.PolygonField', [], {'null': 'True', 'blank': 'True'}),
'center': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'data': (u'django_hstore.fields.DictionaryField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mantainers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Profile']", 'symmetrical': 'False', 'blank': 'True'}),
'minimum_distance': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'new_nodes_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'zoom': ('django.db.models.fields.SmallIntegerField', [], {'default': '12'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db. | models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fi | elds.CharField', [], {'max_length': '1', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanFi |
rackerlabs/django-DefectDojo | dojo/unittests/test_bulk_risk_acceptance_api.py | Python | bsd-3-clause | 6,446 | 0.006826 | import datetime
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIClient
from dojo.models import Product_Type, Product, Engagement, Product_Type_Member, Test, Finding, User, Test_Type, Role
from dojo.authorization.roles_permissions import Roles
class TestBulkRiskAcceptanceApi(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='molly', first_name='Molly', last_name='Mocket', is_staff=True)
cls.token = Token.objects.create(user=cls.user)
cls.product_type = Product_Type.objects.create(name='Web App')
cls.product = Product.objects.create(prod_type=cls.product_type, name='Flopper', description='Test product')
Product_Type_Member.objects.create(product_type=cls.product_type, user=cls.user, role=Role.objects.get(id=Roles.Owner))
cls.product_2 = Product.objects.create(prod_type=cls.product_type, name='Flopper2', description='Test product2')
cls.engagement = Engagement.objects.create(product=cls.product, target_start=datetime.date(2000, 1, 1),
target_end=datetime.date(2000, 2, 1))
cls.engagement_2a = Engagement.objects.create(product=cls.product_2, target_start=datetime.date(2000, 1, 1),
target_end=datetime.date(2000, 2, 1))
cls.engagement_2b = Engagement.objects.create(product=cls.product_2, target_start=datetime.date(2000, 1, 1),
target_end=datetime.date(2000, 2, 1))
cls.test_type = Test_Type.objects.create(name='Risk Acceptance Mock Scan', static_tool=True)
cls.test_a = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type,
target_start=datetime.date(2000, 1, 1), target_end=datetime.date(2000, 2, 1))
cls.test_b = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type,
target_start=datetime.date(2000, 1, 1), target_end=datetime.date(2000, 2, 1))
cls.test_c = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type,
target_start=datetime.date(2000, 1, 1), target_end=datetime.date(2000, 2, 1))
cls.test_d = Test.objects.create(engagement=cls.engagement_2a, test_type=cls.test_type,
target_start=datetime.date(2000, 1, 1), target_end=datetime.date(2000, 2, 1))
cls.test_e = Test.objects.create(engagement=cls.engagement_2b, test_type=cls.test_type,
target_start=datetime.date(2000, 1, 1), target_end=datetime.date(2000, 2, 1))
def create_finding(test: Test, reporter: User, cve: str) -> Finding:
return Finding(test=test, title='Finding {}'.format(cve), cve=cve, severity='High',
description='Hello world!', mitigation='Delete system32', impact='Everything',
reporter=reporter, numerical_severity='S1', static_finding=True, dynamic_finding=False)
Finding.objects.bulk_create(
map(lambda i: create_finding(cls.test_a, cls.user, 'CVE-1999-{}'.format(i)), range(50, 150, 3)))
Finding.objects.bulk_create(
map(lambda i: create_finding(cls.test_b, cls.user, 'CVE-1999-{}'.format(i)), range(51, 150, 3)))
Finding.objects.bulk_create(
map(lambda i: create_finding(cls.test_c, cls.user, 'CVE-1999-{}'.format(i)), range(52, 150, 3)))
Finding.objects.bulk_create(
map(lambda i: create_finding(cls.test_d, cls.user, 'CVE-2000-{}'.format(i)), range(50, 150, 3)))
Finding.objects.bulk_create(
map(lambda i: create_finding(cls.test_e, cls.user, 'CVE-1999-{}'.format(i)), range(50, 150, 3)))
def setUp(self) -> None:
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
def test_test_accept_risks(self):
accepted_risks = [{'cve': 'CVE-1999-{}'.format(i), 'justification': 'Demonstration purposes',
'accepted_by': 'King of the Internet'} for i in range(100, 150)]
result = self.client.post(reverse('test-accept-risks', kwargs={'pk': self.test_a.id} | ), dat | a=accepted_risks,
format='json')
self.assertEquals(len(result.json()), 17)
self.assertEquals(self.test_a.unaccepted_open_findings.count(), 17)
self.assertEquals(self.test_b.unaccepted_open_findings.count(), 33)
self.assertEquals(self.test_c.unaccepted_open_findings.count(), 33)
self.assertEquals(self.test_d.unaccepted_open_findings.count(), 34)
self.assertEquals(self.engagement_2a.risk_acceptance.count(), 0)
def test_engagement_accept_risks(self):
accepted_risks = [{'cve': 'CVE-1999-{}'.format(i), 'justification': 'Demonstration purposes',
'accepted_by': 'King of the Internet'} for i in range(100, 150)]
result = self.client.post(reverse('engagement-accept-risks', kwargs={'pk': self.engagement.id}),
data=accepted_risks, format='json')
self.assertEquals(len(result.json()), 50)
self.assertEquals(self.engagement.unaccepted_open_findings.count(), 50)
self.assertEquals(self.engagement_2a.risk_acceptance.count(), 0)
self.assertEquals(self.engagement_2a.unaccepted_open_findings.count(), 34)
def test_finding_accept_risks(self):
accepted_risks = [{'cve': 'CVE-1999-{}'.format(i), 'justification': 'Demonstration purposes',
'accepted_by': 'King of the Internet'} for i in range(60, 140)]
result = self.client.post(reverse('finding-accept-risks'), data=accepted_risks, format='json')
self.assertEquals(len(result.json()), 106)
self.assertEquals(Finding.unaccepted_open_findings().count(), 62)
self.assertEquals(self.engagement_2a.risk_acceptance.count(), 0)
self.assertEquals(self.engagement_2a.unaccepted_open_findings.count(), 34)
for ra in self.engagement_2b.risk_acceptance.all():
for finding in ra.accepted_findings.all():
self.assertEquals(self.engagement_2a.product, finding.test.engagement.product)
|
simphony/tornado-webapi | tornadowebapi/renderers/tests/test_json_renderer.py | Python | bsd-3-clause | 287 | 0 | import unittest
from | tornadowebapi.renderers import JSONRenderer
class TestJSONRenderer(unittest.TestCase):
def test_basic_rendering(self):
renderer = JSONRenderer()
self.assertEqual(renderer.render({}), "{}")
self.assertEqual(ren | derer.render(None), None)
|
TeXitoi/navitia | source/jormungandr/jormungandr/modules/v1_routing/v1_routing.py | Python | agpl-3.0 | 9,012 | 0.000555 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from jormungandr.interfaces.v1 import Uri
from jormungandr.interfaces.v1 import Coverage
from jormungandr.interfaces.v1 import Journeys
from jormungandr.interfaces.v1 import Schedules
from jormungandr.interfaces.v1 import Places
from jormungandr.interfaces.v1 import Ptobjects
from jormungandr.interfaces.v1 import Coord
from jormungandr.interfaces.v1 import Disruptions
from jormungandr.interfaces.v1 import Calendars
from jormungandr.interfaces.v1 import converters_collection_type
from jormungandr.interfaces.v1 import Status
from werkzeug.routing import BaseConverter, FloatConverter, PathConverter
from jormungandr.modules_loader import AModule
from resources import Index
class RegionConverter(BaseConverter):
""" The region you want to query"""
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
self.regex = '[^(/;)]+'
class LonConverter(FloatConverter):
""" The longitude of where the coord you want to query"""
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
self.type_ = "float"
self.regex = '-?\\d+(\\.\\d+)?'
class LatConverter(FloatConverter):
""" The latitude of where the coord you want to query"""
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
self.type_ = "float"
self.regex = '-?\\d+(\\.\\d+)?'
class UriConverter(PathConverter):
"""First part of the uri"""
def __init__(self, *args, **kwargs):
PathConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
class IdConverter(BaseConverter):
"""Id of the object you want to query"""
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
class V1Routing(AModule):
def __init__(self, api, name):
super(V1Routing, self).__init__(api, name,
description='Current version of navitia API',
status='current',
index_endpoint='index')
def setup(self):
self.api.app.url_map.converters['region'] = RegionConverter
self.api.app.url_map.converters['lon'] = LonConverter
self.api.app.url_map.converters['lat'] = LatConverter
self.api.app.url_map.converters['uri'] = UriConverter
self.api.app.url_map.converters['id'] = IdConverter |
self.api.app.url_map.strict_slashes = False
self.module_resources_manager.register_resource(Index.Index())
self.add_resource(Index.Index,
'/',
'',
endpoint='index')
self.module_resources_manager.register_resource(Index.TechnicalStatus())
self.add_resource(Index.TechnicalStatus,
'/status',
| endpoint='technical_status')
coverage = '/coverage/'
region = coverage + '<region:region>/'
coord = coverage + '<lon:lon>;<lat:lat>/'
self.add_resource(Coverage.Coverage,
coverage,
region,
coord,
endpoint='coverage')
self.add_resource(Coord.Coord,
'/coord/<lon:lon>;<lat:lat>',
'/coords/<lon:lon>;<lat:lat>',
endpoint='coord')
collecs = converters_collection_type.collections_to_resource_type.keys()
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
region + collection,
coord + collection,
region + '<uri:uri>/' + collection,
coord + '<uri:uri>/' + collection,
endpoint=collection + '.collection')
self.add_resource(getattr(Uri, collection)(False),
region + collection + '/<id:id>',
coord + collection + '/<id:id>',
region + '<uri:uri>/' + collection + '/<id:id>',
coord + '<uri:uri>/' + collection + '/<id:id>',
endpoint=collection + '.id')
collecs = ["routes", "lines", "line_groups", "networks", "stop_areas", "stop_points",
"vehicle_journeys"]
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
'/' + collection,
endpoint=collection + '.external_codes')
self.add_resource(Places.Places,
region + 'places',
coord + 'places',
'/places',
endpoint='places')
self.add_resource(Ptobjects.Ptobjects,
region + 'pt_objects',
coord + 'pt_objects',
endpoint='pt_objects')
self.add_resource(Places.PlaceUri,
region + 'places/<id:id>',
coord + 'places/<id:id>',
endpoint='place_uri')
self.add_resource(Places.PlacesNearby,
region + 'places_nearby',
coord + 'places_nearby',
region + '<uri:uri>/places_nearby',
coord + '<uri:uri>/places_nearby',
endpoint='places_nearby')
self.add_resource(Journeys.Journeys,
region + '<uri:uri>/journeys',
coord + '<uri:uri>/journeys',
region + 'journeys',
coord + 'journeys',
'/journeys',
endpoint='journeys')
self.add_resource(Schedules.RouteSchedules,
region + '<uri:uri>/route_schedules',
coord + '<uri:uri>/route_schedules',
'/route_schedules',
endpoint='route_schedules')
self.add_resource(Schedules.NextArrivals,
region + '<uri:uri>/arrivals',
coord + '<uri:uri>/arrivals',
region + 'arrivals',
coord + 'arrivals',
endpoint='arrivals')
self.add_resource(Schedules.NextDepartures,
region + '<uri:uri>/departures',
coord + '<uri:uri>/departures',
region + 'departures',
coord + 'departures',
endpoint='departures')
self.add_resource(Schedules.StopSchedules,
|
Rondineli/django-sso | django_sso/sso_request/urls.py | Python | gpl-2.0 | 242 | 0 | # -*- coding:utf-8 -*-
from django.c | onf.urls import patterns, url
from django.contrib import admin
from views import AssertView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', | AssertView.as_view(), name='redirect'),
)
|
bbc/ebu-tt-live-toolkit | ebu_tt_live/bindings/_itts.py | Python | bsd-3-clause | 69 | 0 | # -*- coding: utf-8 -*-
from | ebu_tt_live.bind | ings.raw._itts import *
|
bw4sz/MotionMeerkat_Bisque | MotionMeerkat/report.py | Python | gpl-3.0 | 3,667 | 0.011999 | import time
import csv
def report(ob):
#Create log file
log_file_report = ob.file_destination + "/" + "Parameters_Results.log"
log_report = file(log_file_report, 'a' )
#Print parameters
#Batch or single file
log_report.write("\nRun type: %s" % ob.runtype)
if ob.runtype in ["file","pictures"]:
log_report.write("\nInput file path: %s" % ob.inDEST)
else:
log_report.write("\nInput file path: %s" % ob.batchpool)
log_report.write("\nOutput dir: %s" % ob.fileD)
log_report.write("\nAdapt accAvg? %s" % ob.adapt)
if ob.adapt:
log_report.write("\nExpected hitrate: %s" % ob.frameHIT)
log_report.write("\nMinimum accAvg: %s" % ob.floorvalue)
log_report.write("\nThreshold %s" % ob.threshT)
log_report.write("\nMinimum contour area: %s" % ob.minSIZE)
log_report.write("\nBurnin: %s" % ob.burnin)
log_report.write("\nScan frames: %s" % ob.scan)
if ob.frameSET:
log_report.write("\nManual framerate: %s" % ob.frame_rate)
log_report.write("\nSet ROI: %s | " % ob.ROI_include)
log_report.write("\nArea counter?: %s" % ob.set_areacounter)
log_report.write("\nOutput type?: %s\n\n" % ob.makeVID)
#Ending time
end | =time.time()
#total_time()
total_min=(end-ob.start)/60
#processed frames per second
pfps=float(ob.frame_count)/(total_min*60)
##Write to log file
log_report.write("Total run time (min): %.2f \n " % total_min)
log_report.write("Average frames per second: %.2f \n " % pfps)
#End of program, report some statistic to screen and log
#log
log_report.write("\n Thank you for using MotionMeerkat! \n")
log_report.write("Candidate motion events: %.0f \n " % ob.total_count )
log_report.write("Frames skipped due to Threshold: %.0f \n " % ob.nocountr)
log_report.write("Frames skipped due to minSIZE: %.0f \n " % ob.toosmall)
log_report.write("Total frames in files: %.0f \n " % ob.frame_count)
rate=float(ob.total_count)/ob.frame_count*100
log_report.write("Hitrate: %.2f %% \n" % rate)
log_report.write("Exiting")
#print to screen
print("\n\nThank you for using MotionMeerkat! \n")
print("Total run time (min): %.2f \n " % total_min)
print("Average frames processed per second: %.2f \n " % pfps)
print("Candidate motion events: %.0f \n " % ob.total_count )
print("Frames skipped due to AccAvg: %.0f \n " % ob.nodiff)
print("Frames skipped due to Threshold: %.0f \n " % ob.nocountr)
print("Frames skipped due to minSIZE: %.0f \n " % ob.toosmall)
print("Total frames in files: %.0f \n " % ob.frame_count)
rate=float(ob.total_count)/ob.frame_count*100
print("Hitrate: %.2f %% \n" % rate)
#reset frame count if in batch loop
ob.frame_count=0
ob.total_count=0
ob.toosmall=0
ob.nocountr=0
#Write csv of time stamps and frame counts
#file name
time_stamp_report = ob.file_destination + "/" + "Frames.csv"
with open(time_stamp_report, 'wb') as f:
writer = csv.writer(f)
writer.writerows(ob.stamp)
if ob.set_areacounter:
area_report = ob.file_destination + "/" + "AreaCounter.csv"
with open(area_report, 'wb') as f:
writer = csv.writer(f)
writer.writerows(ob.areaC) |
BhallaLab/moose-examples | squid/squid_demo.py | Python | gpl-2.0 | 43,246 | 0.005113 | # -*- coding: utf-8 -*-
# Description: Squid Model
# Author: Subha
# Maintainer: Dilawar Singh <dilawars@ncbs.res.in>
# Created: Mon Jul 9 18:23:55 2012 (+0530)
# Version:
# Last-Updated: Wednesday 12 September 2018 04:23:52 PM IST
# PyQt5 version
import sys
import os
from collections import defaultdict
import time
try:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QMainWindow, QApplication, QGroupBox, QSizePolicy
from PyQt5.QtWidgets import QLabel, QLineEdit, QGridLayout, QDockWidget
from PyQt5.QtWidgets import QCheckBox, QTabWidget, QComboBox, QWidget
from PyQt5.QtWidgets import QVBoxLayout, QFrame, QHBoxLayout, QAction
from PyQt5.QtWidgets import QToolButton, QScrollArea, QTextBrowser
from PyQt5.QtWidgets import QMessageBox
except ImportError as e:
print( '[INFO] PyQt5 not found. Quitting...' )
quit()
import numpy
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import moose
from squid import *
from squid_setup import SquidSetup
from electronics import ClampCircuit
tooltip_Nernst = """<h3>Ionic equilibrium potential</h3>
<p/>
The equilibrium potential for ion C is given by Nernst equation:
<p>
E<sub>C</sub> = (RT/zF) * ln([C]<sub>out</sub> / [C]<sub>in</sub>)
</p>
where R is the ideal gas constant (8.3145 J/mol K),<br>
T is absolute temperature,<br>
z is the valence of the ion,<br>
F is Faraday's constant 96480 C/mol,<br>
[C]<sub>out</sub> is concentration of C outside the membrane,<br>
[C]<sub>in</sub> is concentration of C inside the membrane."""
tooltip_Erest = """<h3>Resting membrane potential</h3>
<p/>
The resting membrane potential is determined by the ionic
concentrations inside and outside the cell membrane and is given by
the Goldman-Hodgkin-Katz equation:
<p>
V = (RT/F) * ln((P<sub>K</sub>[K<sup>+</sup>]<sub>out</sub> + P<sub>Na</sub>[Na<sup>+</sup>]<sub>out</sub> + P<sub>Cl</sub>[Cl<sup>-</sup>]<sub>in</sub>) / (P<sub>K</sub>[K<sup>+</sup>]in + P<sub>Na</sub>[Na<sup>+</sup>]<sub>in</sub> + P<sub>Cl</sub>[Cl<sup>-</sup>]<sub>out</sub>))
</p>
where P<sub>C</sub> is the permeability of the membrane to ion C.
"""
tooltip_NaChan = """<h3>Na+ channel conductance</h3>
<p/>
The Na<sup>+</sup> channel conductance in squid giant axon is given by:
<p> G<sub>Na</sub> = Ḡ<sub>Na</sub> * m<sup>3</sup> * h </p>
and the current through this channel is:
<p>
I<sub>Na</sub> = G<sub>Na</sub> * (V - E<sub>Na</sub>) = Ḡ<sub>Na</sub> * m<sup>3</sup> * h * (V - E<sub>Na</sub>)
</p>
where Ḡ<sub>Na</sub> is the peak conductance of Na<sup>+</sup> channel, m is
the fraction of activation gates open and h is the fraction of
deactivation gates open. The transition from open to closed state has
first order kinetics:
<p> dm/dt = α<sub>m</sub> * ( 1 - m) | - β<sub>m</sub> * m </p>
and similarly for h.
The steady state values are:
<p> m<sub>∞</sub> = α<sub>m</sub>/(α<sub>m</sub> + β<sub>m</sub | >) </p>
and time constant for steady state is:
<p>τ<sub>m</sub> = 1/ (α<sub>m</sub> + β<sub>m</sub>) </p>
and similarly for h.
"""
tooltip_KChan = """<h3>K+ channel conductance</h3>
<p/>The K+ channel conductance in squid giant axon is given by:
<p> G<sub>K</sub> = Ḡ<sub>K</sub> * n<sup>4</sup></p>
and the current through this channel is:
<p>
I<sub>K</sub> = G<sub>K</sub> * (V - E<sub>K</sub>) = Ḡ<sub>K</sub> * n<sup>4</sup> * (V - E<sub>K</sub>)
</p>
where Ḡ<sub>K</sub> is the peak conductance of K<sup>+</sup> channel,
n is the fraction of activation gates open. The transition from open
to closed state has first order kinetics: <p> dn/dt = α<sub>n</sub> *
( 1 - n) - β<sub>n</sub> * n </p>.
The steady state values are:
<p>
n<sub>∞</sub> = α<sub>n</sub>/(α<sub>n</sub> + β<sub>n</sub>)
</p>
and time constant for steady state is:
<p>
τ<sub>n</sub> = 1/ (α<sub>n</sub> + β<sub>n</sub>)
</p>
and similarly for h.
"""
tooltip_Im = """<h3>Membrane current</h3>
<p/>
The current through the membrane is given by:
<p>
I<sub>m</sub> = C<sub>m</sub> dV/dt + I<sub>K</sub> + I<sub>Na</sub> + I<sub>L</sub>
</p><p>
= C<sub>m</sub> dV/dt + G<sub>K</sub>(V, t) * (V - E<sub>K</sub>) + G<sub>Na</sub> * (V - E<sub>Na</sub>) + G<sub>L</sub> * (V - E<sub>L</sub>)
</p>
where G<sub>L</sub> is the leak current and E<sub>L</sub> is the leak reversal potential.
"""
default_line_edit_size = QtCore.QSize(80, 25)
def set_default_line_edit_size(widget):
widget.setMinimumSize(default_line_edit_size)
widget.setMaximumSize(default_line_edit_size)
class SquidGui( QMainWindow ):
defaults = {}
defaults.update(SquidAxon.defaults)
defaults.update(ClampCircuit.defaults)
defaults.update({'runtime': 50.0,
'simdt': 0.01,
'plotdt': 0.1,
'vclamp.holdingV': 0.0,
'vclamp.holdingT': 10.0,
'vclamp.prepulseV': 0.0,
'vclamp.prepulseT': 0.0,
'vclamp.clampV': 50.0,
'vclamp.clampT': 20.0,
'iclamp.baseI': 0.0,
'iclamp.firstI': 0.1,
'iclamp.firstT': 40.0,
'iclamp.firstD': 5.0,
'iclamp.secondI': 0.0,
'iclamp.secondT': 0.0,
'iclamp.secondD': 0.0
})
def __init__(self, *args):
QMainWindow.__init__(self, *args)
self.squid_setup = SquidSetup()
self._plotdt = SquidGui.defaults['plotdt']
self._plot_dict = defaultdict(list)
self.setWindowTitle('Squid Axon simulation')
self.setDockNestingEnabled(True)
self._createRunControl()
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._runControlDock)
self._runControlDock.setFeatures(QDockWidget.AllDockWidgetFeatures)
self._createChannelControl()
self._channelCtrlBox.setWindowTitle('Channel properties')
self._channelControlDock.setFeatures(QDockWidget.AllDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._channelControlDock)
self._createElectronicsControl()
self._electronicsDock.setFeatures(QDockWidget.AllDockWidgetFeatures)
self._electronicsDock.setWindowTitle('Electronics')
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._electronicsDock)
self._createPlotWidget()
self.setCentralWidget(self._plotWidget)
self._createStatePlotWidget()
self._createHelpMessage()
self._helpWindow.setVisible(False)
self._statePlotWidget.setWindowFlags(QtCore.Qt.Window)
self._statePlotWidget.setWindowTitle('State plot')
self._initActions()
self._createRunToolBar()
self._createPlotToolBar()
def getFloatInput(self, widget, name):
try:
return float(str(widget.text()))
except ValueError:
QMessageBox.critical(self, 'Invalid input', 'Please enter a valid number for {}'.format(name))
raise
def _createPlotWidget(self):
self._plotWidget = QWidget()
self._plotFigure = Figure()
self._plotCanvas = FigureCanvas(self._plotFigure)
self._plotCanvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self._plotCanvas.updateGeometry()
self._plotCanvas.setParent(self._plotWidget)
self._plotCanvas.mpl_connect('scroll_event', self._onScroll)
self._plotFigure.set_canvas(self._plotCanvas)
# Vm and command voltage go in the same subplot
self._vm_axes = self._plotFigure.add_subplot(2,2,1, title='Membrane potential')
self._vm_axes.set_ylim(-20.0, 120.0)
# Channel conductances go to the same subplot
self._g_axes = self._plotFigure.add_subplot(2,2,2, title='Channel conductance')
self._g_axes.set_ylim(0.0, 0.5)
# Injection current for Vclamp/Iclamp go to the same subplot
self._im_axes = self._plotFigure.add_subplot(2,2,3, title='Injection current')
self._im_axes.set |
j16r/rust | src/etc/mirror-all-snapshots.py | Python | apache-2.0 | 944 | 0.003178 | #!/usr/bin/env python
# xfail-license
import os, tarfile, hashlib, re, shutil
from snapshot import *
f = open(snapshotfile)
date = None
rev = None
platform = None
snap = None
i = 0
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if (not parsed): continue
if parsed["type"] == "snapshot":
dat | e = parsed["date"]
rev = parsed["rev"]
elif rev != None and parsed["type"] == "file":
platform = parsed["platform"]
hsh = parsed["hash"]
snap = full_snapshot_name(date, rev, platform, hsh)
dl = os.path.join(download_dir_base, snap)
url = download_url_base + "/" + snap
if (not os.path.exists(dl)):
print("downloading " + url)
get_url_to_file(url, dl)
if (snap_filename_hash_part(snap) == hash_file(dl)):
| print("got download with ok hash")
else:
raise Exception("bad hash on download")
|
TeamEOS/device_xiaomi_cancro | releasetools/releasetools.py | Python | gpl-2.0 | 1,275 | 0.008627 | #
# Copyright (C) 2015 The CyanogenMod Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def FullOTA_PostValidate(info):
info.script.AppendExtra('run_program("/sbin/e2fsck", "-fy", "/dev/block/platform/msm_sdcc.1/by-name/system");');
info.script.AppendExtra('run_program("/tmp/insta | ll/bin/resize2fs_static", "/dev/block/platform/msm_sdcc.1/by-name/s | ystem");');
info.script.AppendExtra('run_program("/sbin/e2fsck", "-fy", "/dev/block/platform/msm_sdcc.1/by-name/system");');
def FullOTA_InstallEnd(info):
info.script.AppendExtra('mount("ext4", "EMMC", "/dev/block/platform/msm_sdcc.1/by-name/system", "/system", "");');
info.script.AppendExtra('run_program("/tmp/install/bin/nfcchecker.sh");');
info.script.AppendExtra('unmount("/system");');
|
zhunor/tweet_to_geojson | tweet_to_geojson.py | Python | mit | 1,117 | 0.029543 | import json
import pprint
import geojson
from geojson import Feature, Point, FeatureCollection
data = []
geoms = []
tweet_features = []
with open('raw.json') as twtr_hamdata:
for satir in twtr_hamdata:
data.append(json.loads(satir))
for i in range(0,len(data)):
geoms.append(data[i]["geo"]["coordinates"])
print g | eoms[i][0], geoms[i][1]
my_feature = Feature(geometry=Point((float(geoms[i][1]),float(geoms[i][0]))),\
properties={"user_location":data[i]["user"]["location"],\
"user_id": data[i]["id"],\
"user_name":data[i]["user"]["name"],\
"screen_name":data[i]["user"]["screen_name"],\
"followers_count":data[i]["user"]["followers_count"],\
"tweet":data[i]["text"],\
"tweet_ti | me":data[i]["created_at"]})
tweet_features.append(my_feature)
#print tweet_features
tweet_FeatureCollection = FeatureCollection(tweet_features[:])
#print tweet_FeatureCollection["type"]
try:
saveFile = open('tweets.geojson','a')
saveFile.write(json.dumps(tweet_FeatureCollection))
saveFile.close()
except Exception as error:
print "Unable to write %s error"%error |
prasanna08/oppia | core/controllers/voice_artist.py | Python | apache-2.0 | 5,564 | 0.00018 | # coding: utf-8
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r | equired by applicable law or agreed to in w | riting, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the translation changes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import user_services
import feconf
import python_utils
import mutagen
from mutagen import mp3
class AudioUploadHandler(base.BaseHandler):
"""Handles audio file uploads (to Google Cloud Storage in production, and
to the local datastore in dev).
"""
# The string to prefix to the filename (before tacking the whole thing on
# to the end of 'assets/').
_FILENAME_PREFIX = 'audio'
@acl_decorators.can_voiceover_exploration
def post(self, exploration_id):
"""Saves an audio file uploaded by a content creator."""
raw_audio_file = self.request.get('raw_audio_file')
filename = self.payload.get('filename')
allowed_formats = list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())
if not raw_audio_file:
raise self.InvalidInputException('No audio supplied')
dot_index = filename.rfind('.')
extension = filename[dot_index + 1:].lower()
if dot_index == -1 or dot_index == 0:
raise self.InvalidInputException(
'No filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise self.InvalidInputException(
'Invalid filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
tempbuffer = python_utils.string_io()
tempbuffer.write(raw_audio_file)
tempbuffer.seek(0)
try:
# For every accepted extension, use the mutagen-specific
# constructor for that type. This will catch mismatched audio
# types e.g. uploading a flac file with an MP3 extension.
if extension == 'mp3':
audio = mp3.MP3(tempbuffer)
else:
audio = mutagen.File(tempbuffer)
except mutagen.MutagenError:
# The calls to mp3.MP3() versus mutagen.File() seem to behave
# differently upon not being able to interpret the audio.
# mp3.MP3() raises a MutagenError whereas mutagen.File()
# seems to return None. It's not clear if this is always
# the case. Occasionally, mutagen.File() also seems to
# raise a MutagenError.
raise self.InvalidInputException(
'Audio not recognized as a %s file' % extension)
tempbuffer.close()
if audio is None:
raise self.InvalidInputException(
'Audio not recognized as a %s file' % extension)
if audio.info.length > feconf.MAX_AUDIO_FILE_LENGTH_SEC:
raise self.InvalidInputException(
'Audio files must be under %s seconds in length. The uploaded '
'file is %.2f seconds long.' % (
feconf.MAX_AUDIO_FILE_LENGTH_SEC, audio.info.length))
if len(set(audio.mime).intersection(
set(feconf.ACCEPTED_AUDIO_EXTENSIONS[extension]))) == 0:
raise self.InvalidInputException(
'Although the filename extension indicates the file '
'is a %s file, it was not recognized as one. '
'Found mime types: %s' % (extension, audio.mime))
mimetype = audio.mime[0]
# Fetch the audio file duration from the Mutagen metadata.
duration_secs = audio.info.length
# For a strange, unknown reason, the audio variable must be
# deleted before opening cloud storage. If not, cloud storage
# throws a very mysterious error that entails a mutagen
# object being recursively passed around in app engine.
del audio
# Audio files are stored to the datastore in the dev env, and to GCS
# in production.
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, exploration_id))
fs.commit(
'%s/%s' % (self._FILENAME_PREFIX, filename),
raw_audio_file, mimetype=mimetype)
self.render_json({'filename': filename, 'duration_secs': duration_secs})
class StartedTranslationTutorialEventHandler(base.BaseHandler):
"""Records that this user has started the state translation tutorial."""
@acl_decorators.can_play_exploration
def post(self, unused_exploration_id):
"""Handles POST requests."""
user_services.record_user_started_state_translation_tutorial(
self.user_id)
self.render_json({})
|
babybunny/rebuildingtogethercaptain | test/test_main.py | Python | apache-2.0 | 1,940 | 0.00567 | """Functional tests f | or main views."""
import os
import unittest
from webtest import TestApp
import app_engine_test_utils
from gae import main
from test import test_models
app = TestApp(main.app)
class WelcomeTest(unittest.Tes | tCase):
def setUp(self):
app_engine_test_utils.activate_app_engine_testbed_and_clear_cache()
def testHelp(self):
response = app.get('/help')
self.assertEquals('200 OK', response.status)
def testRoot(self):
os.environ['ROOMS_DEV_SIGNIN_EMAIL'] = "rebuildingtogether.nobody@gmail.com"
response = app.get('/')
self.assertEquals('200 OK', response.status)
self.assertIn('Welcome to ROOMS', str(response))
self.assertIn('rebuildingtogether.nobody@gmail.com', str(response))
def testRootXHeader(self):
response = app.get('/', headers={'x-rooms-dev-signin-email': 'rebuildingtogether.staff@gmail.com'})
self.assertEquals('200 OK', response.status)
self.assertIn('rebuildingtogether.staff@gmail.com', str(response))
class StatefulTest(unittest.TestCase):
def setUp(self):
app_engine_test_utils.activate_app_engine_testbed_and_clear_cache()
self.keys = test_models.CreateAll()
def testRootXHeaderStaff(self):
response = app.get('/', headers={'x-rooms-dev-signin-email': 'rebuildingtogether.staff@gmail.com'})
self.assertEquals('302 Moved Temporarily', response.status)
self.assertIn('Location', response.headers)
self.assertIn('/staff_home', response.headers['Location'])
self.assertIn('rebuildingtogether.staff@gmail.com', str(response))
def testRootXHeaderCaptain(self):
response = app.get('/', headers={'x-rooms-dev-signin-email': 'rebuildingtogether.capn@gmail.com'})
self.assertEquals('302 Moved Temporarily', response.status)
self.assertIn('Location', response.headers)
self.assertIn('/captain_home', response.headers['Location'])
self.assertIn('rebuildingtogether.capn@gmail.com', str(response))
|
Dob3r/python_seleniumwebdriver | Model/Customer.py | Python | apache-2.0 | 493 | 0.004057 |
class Customer:
def __init__(self, firstname, lastname, country, address, postcode, city, email, phone, pas | sword):
self.firstname = firstname
self.lastname = lastname
self.country = country
self.address = address
self.postcode = postcode
self.city= city
self.email = email
self.phone = phone
self.password = password
def __repr__(self):
return "%s:% | s:%s" % (self.email, self.firstname, self.lastname)
|
valsteen/ableton-live-webapi | ext_libs/Cython/Compiler/ModuleNode.py | Python | unlicense | 122,308 | 0.002502 | #
# Module parse tree node
#
import cython
cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
error=object, warning=object, py_object_type=object, UtilityCode=object,
EncodedString=object)
import os, time
from PyrexTypes import CPtrType
import Future
import Annotate
import Code
import Naming
import Nodes
import Options
import TypeSlots
import Version
import PyrexTypes
from Errors import error, warning
from PyrexTypes import py_object_type
from Cython.Utils import open_new_file, replace_suffix, decode_filename
from Code import UtilityCode
from StringEncoding import EncodedString
def check_c_declarations_pxd(module_node):
module_node.scope.check_c_classes_pxd()
return module_node
def check_c_declarations(module_node):
module_node.scope.check_c_classes()
module_node.scope.check_c_functions()
return module_node
class ModuleNode(Nodes.Node, Nodes.BlockNode):
# doc string or None
# body StatListNode
#
# referenced_modules [ModuleScope]
# full_module_name string
#
# scope The module scope.
# compilation_source A CompilationSource (see Main)
# directives Top-level compiler directives
child_attrs = ["body"]
directives = None
def merge_in(self, tree, scope, merge_scope=False):
# Merges in the contents of another tree, and possibly scope. With the
# current implementation below, this must be done right prior
# to code generation.
#
# Note: This way of doing it seems strange -- I believe the
# right concept is to split ModuleNode into a ModuleNode and a
# CodeGenerator, and tell that CodeGenerator to generate code
# from multiple sources.
assert isinstance(self.body, Nodes.StatListNode)
if isinstance(tree, Nodes.StatListNode):
self.body.stats.extend(tree.stats)
else:
self.body.stats.append(tree)
self.scope.utility_code_list.extend(scope.utility_code_list)
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
L1.append(x)
extend_if_not_in(self.scope.include_files, scope.include_files)
extend_if_not_in(self.scope.included_files, scope.included_files)
extend_if_not_in(self.scope.python_include_files,
scope.python_include_files)
if merge_scope:
# Ensure that we don't generate import code for these entries!
for entry in scope.c_class_entries:
entry.type.module_name = self.full_module_name
self.scope.merge_in(scope)
def analyse_declarations(self, env):
if not Options.docstrings:
env.doc = self.doc = None
elif Options.embed_pos_in_docstring:
env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
if not self.doc is None:
env.doc = EncodedString(env.doc + u'\n' + self.doc)
env.doc.encoding = self.doc.enco | ding
else:
env.doc = self.doc
env.directi | ves = self.directives
self.body.analyse_declarations(env)
def process_implementation(self, options, result):
env = self.scope
env.return_type = PyrexTypes.c_void_type
self.referenced_modules = []
self.find_referenced_modules(env, self.referenced_modules, {})
if options.recursive:
self.generate_dep_file(env, result)
self.generate_c_code(env, options, result)
self.generate_h_code(env, options, result)
self.generate_api_code(env, result)
def has_imported_c_functions(self):
for module in self.referenced_modules:
for entry in module.cfunc_entries:
if entry.defined_in_pxd:
return 1
return 0
def generate_dep_file(self, env, result):
modules = self.referenced_modules
if len(modules) > 1 or env.included_files:
dep_file = replace_suffix(result.c_file, ".dep")
f = open(dep_file, "w")
try:
for module in modules:
if module is not env:
f.write("cimport %s\n" % module.qualified_name)
for path in module.included_files:
f.write("include %s\n" % path)
finally:
f.close()
def generate_h_code(self, env, options, result):
def h_entries(entries, api=0, pxd=0):
return [entry for entry in entries
if ((entry.visibility == 'public') or
(api and entry.api) or
(pxd and entry.defined_in_pxd))]
h_types = h_entries(env.type_entries, api=1)
h_vars = h_entries(env.var_entries)
h_funcs = h_entries(env.cfunc_entries)
h_extension_types = h_entries(env.c_class_entries)
if (h_types or h_vars or h_funcs or h_extension_types):
result.h_file = replace_suffix(result.c_file, ".h")
h_code = Code.CCodeWriter()
Code.GlobalState(h_code, self)
if options.generate_pxi:
result.i_file = replace_suffix(result.c_file, ".pxi")
i_code = Code.PyrexCodeWriter(result.i_file)
else:
i_code = None
h_guard = Naming.h_guard_prefix + self.api_name(env)
h_code.put_h_guard(h_guard)
h_code.putln("")
self.generate_type_header_code(h_types, h_code)
if options.capi_reexport_cincludes:
self.generate_includes(env, [], h_code)
h_code.putln("")
api_guard = Naming.api_guard_prefix + self.api_name(env)
h_code.putln("#ifndef %s" % api_guard)
h_code.putln("")
self.generate_extern_c_macro_definition(h_code)
if h_extension_types:
h_code.putln("")
for entry in h_extension_types:
self.generate_cclass_header_code(entry.type, h_code)
if i_code:
self.generate_cclass_include_code(entry.type, i_code)
if h_funcs:
h_code.putln("")
for entry in h_funcs:
self.generate_public_declaration(entry, h_code, i_code)
if h_vars:
h_code.putln("")
for entry in h_vars:
self.generate_public_declaration(entry, h_code, i_code)
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
h_code.putln("")
h_code.putln("#if PY_MAJOR_VERSION < 3")
h_code.putln("PyMODINIT_FUNC init%s(void);" % env.module_name)
h_code.putln("#else")
h_code.putln("PyMODINIT_FUNC PyInit_%s(void);" % env.module_name)
h_code.putln("#endif")
h_code.putln("")
h_code.putln("#endif /* !%s */" % h_guard)
f = open_new_file(result.h_file)
try:
h_code.copyto(f)
finally:
f.close()
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
entry.type.declaration_code(
entry.cname, dll_linkage = "DL_IMPORT")))
if i_code:
i_code.putln("cdef extern %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
def api_name(self, env):
return env.qualified_name.replace(".", "__")
def generate_api_code(self, env, result):
def api_entries(entries, pxd=0):
return [entry for entry in entries
if entry.api or (pxd and entry.defined_in_pxd)]
api_vars = api_entries(env.var_entries)
api_funcs = api_entries(env.cfunc_entries)
api_extension_types = api_entries(env.c_class_entries)
if api_vars or api_funcs or api_extension_types:
result.api_file = replace_suffix(re |
frankrousseau/weboob | weboob/browser/filters/standard.py | Python | agpl-3.0 | 23,209 | 0.00194 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import datetime
import re
import unicodedata
from decimal import Decimal, InvalidOperation
from itertools import islice
from collections import Iterator
from dateutil.parser import parse as parse_date
from weboob.capabilities.base import empty
from weboob.tools.compat import basestring
from weboob.exceptions import ParseError
from weboob.browser.url import URL
from weboob.tools.log import getLogger, DEBUG_FILTERS
class NoDefault(object):
def __repr__(self):
return 'NO_DEFAULT'
_NO_DEFAULT = NoDefault()
__all__ = ['FilterError', 'ColumnNotFound', 'RegexpError', 'ItemNotFound',
'Filter', 'Base', 'Env', 'TableCell', 'RawText',
'CleanText', 'Lower', 'CleanDecimal', 'Field', 'Regexp', 'Map',
'DateTime', 'Date', 'Time', 'DateGuesser', 'Duration',
'MultiFilter', 'CombineDate', 'Format', 'Join', 'Type',
'BrowserURL', 'Async', 'AsyncLoad']
class FilterError(ParseError):
pass
class ColumnNotFound(FilterError):
pass
class RegexpError(FilterError):
pass
class ItemNotFound(FilterError):
pass
class _Filter(object):
_creation_counter = 0
def __init__(self, default=_NO_DEFAULT):
self._key = None
self._obj = None
self.default = default
self._creation_counter = _Filter._creation_counter
_Filter._creation_counter += 1
def __or__(self, o):
self.default = o
return self
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
o.selector = self
return o
def default_or_raise(self, exception):
if self.default is not _NO_DEFAULT:
return self.default
else:
raise exception
def __str__(self):
return self.__class__.__name__
def debug(*args):
"""
A decorator function to provide some debug information
in Filters.
It prints by default the name of the Filter and the input value.
"""
def wraper(function):
def print_debug(self, value):
logger = getLogger('b2filters')
result = ''
outputvalue = value
if isinstance(value, list):
from lxml import etree
outputvalue = ''
first = True
for element in value:
if first:
first = False
else:
outputvalue += ', '
if isinstance(element, etree.ElementBase):
outputvalue += "%s" % etree.tostring(element, encoding=unicode)
else:
outputvalue += "%r" % element
if self._obj is not None:
result += "%s" % self._obj._random_id
if self._key is not None:
result += ".%s" % self._key
name = str(self)
result += " %s(%r" % (name, outputvalue)
for arg in self.__dict__:
if arg.startswith('_') or arg == u"selector":
continue
if arg == u'default' and getattr(self, arg) == _NO_DEFAULT:
continue
result += ", %s=%r" % (arg, getattr(self, arg))
result += u')'
logger.log(DEBUG_FILTERS, result)
res = function(self, value)
return res
return print_debug
return wraper
class Filter(_Filter):
"""
Class used to filter on a HTML element given as call parameter to return
matching elements.
Filters can be chained, so the parameter supplied to constructor can be
either a xpath selector string, or an other filter called before.
>>> from lxml.html import etree
>>> f = CleanDecimal(CleanText('//p'), replace_dots=True)
>>> f(etree.fromstring('<html><body><p>blah: <span>229,90</span></p></body></html>'))
Decimal('229.90')
"""
def __init__(self, selector=None, default=_NO_DEFAULT):
super(Filter, self).__init__(default=default)
self.selector = selector
@classmethod
def select(cls, selector, item, obj=None, key=None):
if isinstance(selector, basestring):
return item.xpath(selector)
elif isinstance(selector, _Filter):
selector._key = key
selector._obj = obj
return selector(item)
elif callable(selector):
return selector(item)
else:
return selector
def __call__(self, item):
return self.filter(self.select(self.selector, item, key=self._key, obj=self._obj))
@debug()
def filter(self, value):
"""
This method have to be overrided by children classes.
"""
raise NotImplementedError()
class _Selector(Filter):
def filter(self, elements):
if elements is not None:
return elements
else:
ret | urn self.default_or_raise(ParseError('Element %r not found' % self.selector))
class AsyncLoad(Filter):
def __call__(self, item):
link = self.select(self.selector, item, key=self._key, obj=self._obj)
return item.page.browser.async_open(link)
class | Async(_Filter):
def __init__(self, name, selector=None):
super(Async, self).__init__()
self.selector = selector
self.name = name
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
self.selector = o
return self
def __call__(self, item):
result = item.loaders[self.name].result()
assert result.page is not None, 'The loaded url %s hasn\'t been matched by an URL object' % result.url
return self.selector(result.page.doc)
class Base(Filter):
"""
Change the base element used in filters.
>>> Base(Env('header'), CleanText('./h1')) # doctest: +SKIP
"""
def __call__(self, item):
base = self.select(self.base, item, obj=self._obj, key=self._key)
return self.selector(base)
def __init__(self, base, selector=None, default=_NO_DEFAULT):
super(Base, self).__init__(selector, default)
self.base = base
class Env(_Filter):
"""
Filter to get environment value of the item.
It is used for example to get page parameters, or when there is a parse()
method on ItemElement.
"""
def __init__(self, name, default=_NO_DEFAULT):
super(Env, self).__init__(default)
self.name = name
def __call__(self, item):
try:
return item.env[self.name]
except KeyError:
return self.default_or_raise(ParseError('Environment variable %s not found' % self.name))
class TableCell(_Filter):
"""
Used with TableElement, it get the cell value from its name.
For example:
>>> from weboob.capabilities.bank import Transaction
>>> from weboob.browser.elements import TableElement, ItemElement
>>> class table(TableElement):
... head_xpath = '//table/thead/th'
... item_xpath = '//table/tbody/tr'
... col_date = u'Date'
... col_label = [u'Name', u'Label']
... class item(ItemElement):
... klass = Transaction
... obj_date = Date(TableCell('date'))
... obj_label = CleanText(TableCell('label'))
...
"""
def __init__(self, *names, **kwargs):
s |
hanya/MRI | pythonpath/mytools_Mri/ui/controller.py | Python | apache-2.0 | 2,539 | 0.010634 | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unohelper
from com.sun.star.frame import XController, XTitle, XDispatchProvider
from com.sun.star.lang import XServiceInfo
from com.sun.star.task import XStatusIndicatorSupplier
class MRIUIController(unohelper.Base,
XController, XTitle, XDispatchProvider,
XStatusIndicatorSupplier, XServiceInfo):
""" Provides controller which connects between frame and model. """
IMPLE_NAME | = "mytools.mri.UIController"
def __init__(self,frame, model):
self.frame = frame
self.model = model
self.ui = None
|
def set_ui(self, ui):
self.ui = ui
def get_imple_name(self):
return self.ui.pages.get_imple_name()
# XTitle
def getTitle(self):
return self.frame.getTitle()
def setTitle(self, title):
self.frame.setTitle(title)
def dispose(self):
self.frame = None
self.model = None
def addEventListener(self, xListener):
pass
def removeEventListener(self, aListener):
pass
# XController
def attachFrame(self, frame):
self.frame = frame
def attachModel(self, model):
self.model = model
def suspend(self, Suspend):
return True
def getViewData(self):
""" Returns current instance inspected. """
return self.ui.main.current.target
def restoreViewData(self, Data):
pass
def getModel(self):
return self.model
def getFrame(self):
return self.frame
def getStatusIndicator(self):
pass
# XDispatchProvider
def queryDispatch(self, url, name, flags):
pass
def queryDispatches(self, requests):
pass
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def supportsService(self, name):
return name == self.IMPLE_NAME
def getSupportedServiceNames(self):
return self.IMPLE_NAME,
|
ArcEye/machinekit-testing | src/hal/user_comps/vismach/scaragui.py | Python | lgpl-2.1 | 5,700 | 0.017719 | #!/usr/bin/python2.4
# Copyright 2007 John Kasunich and Jeff Epler
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from vismach import *
import hal
import math
import sys
c = hal.component("scaragui")
c.newpin("joint0", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint1", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint2", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint3", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint4", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint5", hal.HAL_FLOAT, hal.HAL_IN)
c.ready()
# parameters that define the geometry see scarakins.c for definitions these
# numbers match the defaults there, and will need to be changed or specified on
# the commandline if you are not using the defaults.
d1 = 490.0
d2 = 340.0
d3 = 50.0
d4 = 250.0
d5 = 50.0
d6 = 50.0
j3min = 40.0
j3max = 270 | .0
for setting in sys.argv[1:]: exec setting
# calculate a bunch of other dimensions that are used
# to scale the model of the machine
# most of these scale factors are arbitrary, to give
# a nicely proportioned machine. If you know specifics
# for the machine you are modeling, feel free to change
# these numbers
tool_len = math.sqrt(d | 5*d5+d6*d6) # don't change
tool_dia = tool_len / 6.0
# diameters of the arms
l1_dia = d2 / 5.0
l2_dia = d4 / 5.0
l3_dia = l2_dia * 0.8
# diameters of the "lumps" at the joints
j0_dia = l1_dia * 1.5
j1_dia = max(l1_dia * 1.25, l2_dia * 1.5)
j2_dia = l2_dia * 1.25
# other dims
j0_hi = l1_dia * 1.2
j1_hi1 = l1_dia * 1.1
j1_hi2 = l2_dia * 1.2
j2_hi = l2_dia * 1.3
# don't change these
tool_angle = math.degrees(math.atan2(d6,d5))
tool_radius = tool_dia / 2.0
l1_rad = l1_dia / 2.0
l2_rad = l2_dia / 2.0
l3_len = j3max + j2_hi * 0.7
l3_rad = l3_dia / 2.0
j0_hi = j0_hi / 2.0
j0_rad = j0_dia / 2.0
j1_hi1 = j1_hi1 / 2.0
j1_hi2 = j1_hi2 / 2.0
j1_rad = j1_dia / 2.0
j2_hi = j2_hi / 2.0
j2_rad = j2_dia / 2.0
size = max(d1+d3+l3_len,d2+d4+d6)
# tool - cylinder with a point, and a ball to hide the blunt back end
# the origin starts out at the tool tip, and we want to capture this
# "tooltip" coordinate system
tooltip = Capture()
tool = Collection([
tooltip,
Sphere(0.0, 0.0, tool_len, tool_dia),
CylinderZ(tool_len, tool_radius, tool_dia, tool_radius),
CylinderZ(tool_dia, tool_radius, 0.0, 0.0)])
# translate so origin is at base of tool, not the tip
tool = Translate([tool],0.0,0.0,-tool_len)
# the tool might not be pointing straight down
tool = Rotate([tool],tool_angle,0.0,-1.0,0.0)
# make joint 3 rotate
tool = HalRotate([tool],c,"joint3",1,0,0,1)
link3 = CylinderZ(0.0, l3_rad, l3_len, l3_rad)
# attach tool to end
link3 = Collection([tool,link3])
# make joint 2 go up and down
link3 = HalTranslate([link3],c,"joint2",0,0,-1)
# outer arm
# start with link3 and the cylinder it slides in
link2 = Collection([
link3,
CylinderZ(-j2_hi, j2_rad, j2_hi, j2_rad)])
# move to end of arm
link2 = Translate([link2], d4, 0.0, 0.0)
# add the arm itself
link2 = Collection([
link2,
CylinderX(d4, l2_rad, 1.5*j1_rad, l2_rad)])
# the joint gets interesting, because link2 can be above or below link1
if d3 > 0:
flip = 1
else:
flip = -1
# add the joint
link2 = Collection([
link2,
Box(1.5*j1_rad, -0.9*j1_rad, -j1_hi2, 1.15*j1_rad, 0.9*j1_rad, j1_hi2),
Box(1.15*j1_rad, -0.9*j1_rad, -0.4*d3, 0.0, 0.9*j1_rad, flip*j1_hi2),
CylinderZ(-0.4*d3, j1_rad, flip*1.2*j1_hi2, j1_rad)])
# make the joint work
link2 = HalRotate([link2],c,"joint1",1,0,0,1)
# inner arm
# the outer arm and the joint
link1 = Collection([
Translate([link2],0.0,0.0,d3),
Box(-1.5*j1_rad, -0.9*j1_rad, -j1_hi1, -1.15*j1_rad, 0.9*j1_rad, j1_hi1),
Box(-1.15*j1_rad, -0.9*j1_rad, 0.4*d3, 0.0, 0.9*j1_rad, -flip*j1_hi1),
CylinderZ(0.4*d3, j1_rad, flip*-1.2*j1_hi1, j1_rad),
CylinderZ(0.6*d3, 0.8*j1_rad, 0.4*d3, 0.8*j1_rad)])
# move to end of arm
link1 = Translate([link1], d2, 0.0, 0.0)
# add the arm itself, and the inner joint
link1 = Collection([
link1,
CylinderX(d2-1.5*j1_rad, l1_rad, 1.5*j0_rad, l1_rad),
Box(1.5*j0_rad, -0.9*j0_rad, -j0_hi, 0.0, 0.9*j0_rad, j0_hi),
CylinderZ(-1.2*j0_hi, j0_rad, 1.2*j0_hi, j0_rad)])
# make the joint work
link1 = HalRotate([link1],c,"joint0",1,0,0,1)
#stationary base
link0 = Collection([
CylinderZ(d1-j0_hi, 0.8*j0_rad, d1-1.5*j0_hi, 0.8*j0_rad),
CylinderZ(d1-1.5*j0_hi, 0.8*j0_rad, 0.07*d1, 1.3*j0_rad),
CylinderZ(0.07*d1, 2.0*j0_rad, 0.0, 2.0*j0_rad)])
# slap the arm on top
link0 = Collection([
link0,
Translate([link1],0,0,d1)])
# add a floor
floor = Box(-0.5*size,-0.5*size,-0.02*size,0.5*size,0.5*size,0.0)
# and a table for the workpiece - define in workpiece coords
reach = d2+d4-d6
table_height = d1+d3-j3max-d5
work = Capture()
table = Collection([
work,
Box(-0.35*reach,-0.5*reach, -0.1*d1, 0.35*reach, 0.5*reach, 0.0)])
# make the table moveable (tilting)
table = HalRotate([table],c,"joint4",1,0,1,0)
table = HalRotate([table],c,"joint5",1,1,0,0)
# put the table into its proper place
table = Translate([table],0.5*reach,0.0,table_height)
model = Collection([link0, floor, table])
main(model, tooltip, work, size)
|
roycem90/python-o365 | O365/inbox.py | Python | apache-2.0 | 2,984 | 0.037198 | from O365.message import Message
import logging
import json
import requests
log = logging.getLogger(__name__)
class Inbox( object ):
'''
Wrapper class for an inbox which mostly holds a list of messages.
Methods:
getMessages -- downloads messages to local memory.
Variables:
inbox_url -- url used for fetching emails.
'''
#url for fetching emails. Takes a flag for whether they are read or not.
inbox_url = 'https://outlook.office365.com/api/v1.0/me/messages'
def __init__(self, auth, getNow=True, verify=True):
'''
Creates a new inbox wrapper. Send email and password for authentication.
set getNow to false if you don't want to immedeatly download new messages.
'''
log.debug('creating inbox for the email %s',auth[0])
self.auth = auth
self.messages = []
self.filters = ''
if getNow:
self.filters = 'IsRead eq false'
self.getMessages()
self.verify = verify
def getMessages(self, number = 10):
'''
Downloads messages to local memory.
You create an inbox to be the container class for messages, this method
then pulls those messages down to the local disk. This is called in the
init method, so it's kind of pointless for you. Unless you think new
messages have come in.
You can filter only certain emails by setting filters. See the set and
get filters methods for more information.
'''
log.debug('fetching messages.')
response = requests.get(self.inbox_url,auth=self.auth,params={'$filter':self.filters, '$top':number},verify=self.verify)
log.info('Response from O365: %s', str(response))
for message in response.json()['value']:
try:
duplicate = False
for i,m in enumerate(self.messages):
if message['Id'] == m.json['Id']:
self.messages[i] = Message(message,self.auth)
duplicate = True
break
if not duplicate:
self.messages.append(Message(message,self.auth))
log.debug('appended message: %s',message['Subject'])
except Exception as e:
log.info('failed to append message: %',str(e))
log.debug('all messages retrieved and put in to the list.')
return True
def getFilter(self):
'''get the value set for a specific filter, if exists, else None'''
return self.filters
def setFilter(self,f_string):
'''
Set the value of a filter. More information on what filters are available
can be found here:
https://msdn.microsoft.com/office/office365/APi/complex-types-for-mail-contacts-calendar#RESTAPIResourcesMessage
I may in the future have the ability to add these in yourself. but right now that is to complicated.
Arguments:
f_string -- The string that represents the filters you want to enact.
should be something like: (HasAttachments eq true) and (IsRead eq false)
or just: IsRead eq false |
test your filter stirng here: https://outlook.office365.com/api/v1.0/me/messages?$filter=
if that accepts it then you know it works.
'''
self.filters = f_string
return True
#To | the King!
|
StevenLOL/aicyber_semeval_2016_ivector | System_2/steps/conf/append_eval_to_ctm.py | Python | gpl-3.0 | 1,989 | 0.019608 | #!/bin/env python
# Copyright 2015 Brno University of Technology (author: Karel Vesely)
# Apache 2.0
import sys,opera | tor
# Append Levenshtein alignment of 'hypothesis' and 'reference' into 'CTM':
# (i.e. the output of 'align-text' post-processed by 'wer_per_utt_details.pl')
# The tags in the appended column are:
# 'C' = correct
# 'S' = substitution
# 'I' = insertion
# 'U' = unknown (not part of scored segment)
if len(sys.argv) != 4:
print 'Usage: %s eval-in ctm-in ctm-eval-out' % __file__
sys.exit | (1)
dummy, eval_in, ctm_in, ctm_eval_out = sys.argv
if ctm_eval_out == '-': ctm_eval_out = '/dev/stdout'
# Read the evalutation,
eval_vec = dict()
with open(eval_in, 'r') as f:
while True:
# Reading 4 lines encoding one utterance,
ref = f.readline()
hyp = f.readline()
op = f.readline()
csid = f.readline()
if not ref: break
# Parse the input,
utt,tag,hyp_vec = hyp.split(' ',2)
assert(tag == 'hyp')
utt,tag,op_vec = op.split(' ',2)
assert(tag == 'op')
hyp_vec = hyp_vec.split()
op_vec = op_vec.split()
# Fill create eval vector with symbols 'C', 'S', 'I',
assert(utt not in eval_vec)
eval_vec[utt] = []
for op,hyp in zip(op_vec, hyp_vec):
if hyp != '<eps>': eval_vec[utt].append(op)
# Load the 'ctm' into dictionary,
ctm = dict()
with open(ctm_in) as f:
for l in f:
utt, ch, beg, dur, wrd, conf = l.split()
if not utt in ctm: ctm[utt] = []
ctm[utt].append((utt, ch, float(beg), float(dur), wrd, float(conf)))
# Build the 'ctm' with 'eval' column added,
ctm_eval = []
for utt,ctm_part in ctm.iteritems():
ctm_part.sort(key = operator.itemgetter(2)) # Sort by 'beg' time,
# extending the 'tuple' by '+':
merged = [ tup + (evl,) for tup,evl in zip(ctm_part,eval_vec[utt]) ]
ctm_eval.extend(merged)
# Sort again,
ctm_eval.sort(key = operator.itemgetter(0,1,2))
# Store,
with open(ctm_eval_out,'w') as f:
for tup in ctm_eval:
f.write('%s %s %f %f %s %f %s\n' % tup)
|
dmlc/tvm | tests/python/contrib/test_ethosu/cascader/__init__.py | Python | apache-2.0 | 832 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distrib | uted under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. | See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test infrastructure for the NPU cascader"""
|
codewarrior0/pymclevel | items.py | Python | isc | 19,714 | 0.001572 | from collections import defaultdict
import logging
logger = logging.getLogger(__file__)
items_txt = """
:version 27
# Blocks
# ID NAME FILE CORDS DAMAGE
1 Stone terrain.png 1,0
2 Grass terrain.png 3,0
3 Dirt terrain.png 2,0
4 Cobblestone terrain.png 0,1
5 Wooden_Planks terrain.png 4,0
6 Sapling terrain.png 15,0 0
6 Spruce_Sapling terrain.png 15,3 1
6 Birch_Sapling terrain.png 15,4 2
7 Bedrock terrain.png 1,1
8 Water terrain.png 15,13
9 Still_Water terrain.png 15,13
10 Lava terrain.png 15,15
11 Still_Lava terrain.png 15,15
12 Sand terrain.png 2,1
13 Gravel terrain.png 3,1
14 Gold_Ore terrain.png 0,2
15 Iron_Ore terrain.png 1,2
16 Coal_Ore terrain.png 2,2
17 Wood terrain.png 4,1 0
17 Dark_Wood terrain.png 4,7 1
17 Birch_Wood terrain.png 5,7 2
18 Leaves special.png 5,0 0
18 Dark_Leaves special.png 5,1 1
18 Birch_Leaves special.png 5,2 2
19 Sponge terrain.png 0,3
20 Glass terrain.png 1,3
21 Lapis_Lazuli_Ore terrain.png 0,10
22 Lapis_Lazuli_Block terrain.png 0,9
23 Dispenser terrain.png 14,2
24 Sandstone terrain.png 0,12
25 Note_Block terrain.png 10,4
26 Bed_Block terrain.png 6,8
27 Powered_Rail terrain.png 3,10
28 Detector_Rail terrain.png 3,12
29 Sticky_Piston terrain.png 10,6
30 Cobweb terrain.png 11,0
31 Dead_Bush terrain.png 7,3 0
31 Tall_Grass special.png 5,3 1
31 Fern special.png 4,5 2
32 Dead_Bush terrain.png 7,3
33 Piston terrain.png 11,6
34 Piston_(head) terrain.png 11,6
35 Wool terrain.png 0,4 0
35 Orange_Wool terrain.png 2,13 1
35 Magenta_Wool terrain.png 2,12 2
35 Light_Blue_Wool terrain.png 2,11 3
35 Yellow_Wool terrain.png 2,10 4
35 Lime_Wool terrain.png 2,9 5
35 Pink_Wool terrain.png 2,8 6
35 Gray_Wool terrain.png 2,7 7
35 Light_Gray_Wool terrain.png 1,14 8
35 Cyan_Wool terrain.png 1,13 9
35 Purple_Wool terrain.png 1,12 10
35 Blue_Wool terrain.png 1,11 11
35 Brown_Wool terrain.png 1,10 12
35 Green_Wool terrain.png 1,9 13
35 Red_Wool terrain.png 1,8 14
35 Black_Wool terrain.png 1,7 15
37 Flower terrain.png 13,0
38 Rose terrain.png 12,0
39 Brown_Mushroom terrain.png 13,1
40 Red_Mushroom terrain.png 12,1
41 Block_of_Gold | terrain.png 7,1
42 Block_of_Iron terrain.png 6,1
43 Double_Stone_Slab terrain.png 5,0 0
43 Double_Sandstone_Slab terrain.png 0,12 1
43 Double_Wooden_Slab terrain.png 4,0 2
43 Double_Stone_Slab terrain.png 0,1 3
44 Stone_Slab | special.png 2,2 0
44 Sandstone_Slab special.png 1,2 1
44 Wooden_Slab special.png 3,0 2
44 Stone_Slab special.png 1,0 3
44 Bricks_Slab special.png 0,0 4
44 Stone_Bricks_Slab special.png 2,0 5
45 Bricks terrain.png 7,0
46 TNT terrain.png 8,0
47 Bookshelf terrain.png 3,2
48 Moss_Stone terrain.png 4,2
49 Obsidian terrain.png 5,2
50 Torch terrain.png 0,5
51 Fire special.png 4,0
52 Monster_Spawner terrain.png 1,4
53 Wooden_Stairs special.png 3,1
54 Chest terrain.png 11,1
55 Redstone_Dust terrain.png 4,5
56 Diamond_Ore terrain.png 2,3
57 Block_of_Diamond terrain.png 8,1
58 Workbench terrain.png 12,3
59 Crops terrain.png 15,5
60 Farmland terrain.png 7,5
61 Furnace terrain.png 12,2
62 Lit_Furnace terrain.png 13,3
63 Sign_Block terrain.png 0,0
64 Wooden_Door_Block terrain.png 1,6
65 Ladder terrain.png 3,5
66 Rail terrain.png 0,8
67 Stone_Stairs special.png 1,1
68 Wall_Sign terrain.png 4,0
69 Lever terrain.png 0,6
70 Stone_Pressure_Plate special.png 2,4
71 Iron_Door_Block terrain.png 2,6
72 Wooden_Pressure_Plate special.png 3,4
73 Redstone_Ore terrain.png 3,3
74 Glowing_Redstone_Ore terrain.png 3,3
75 Redstone_Torch_(off) terrain.png 3,7
76 Redstone_Torch terrain.png 3,6
77 Button special.png 2,3
78 Snow_Layer special.png 1,4
79 Ice terrain.png 3,4
80 Snow terrain.png 2,4
81 Cactus terrain.png 6,4
82 Clay terrain.png 8,4
83 Sugar_cane terrain.png 9,4
84 Jukebox terrain.png 10,4
85 Fence special.png 3,2
86 Pumpkin terrain.png 7,7
87 Netherrack terrain.png 7,6
88 Soul_Sand terrain.png 8,6
89 Glowstone terrain.png 9,6
90 Portal special.png 0,5
91 Jack-o'-lantern terrain.png 8,7
92 Cake special.png 0,2
93 Repeater_Block_(off) terrain.png 3,8
94 Repeater_Block terrain.png 3,9
95 Locked_Chest terrain.png 11,1
96 Trapdoor terrain.png 4,5
97 Silverfish_Block terrain.png 1,0
98 Stone_Bricks terrain.png 6,3 0
98 Mossy_Stone_Bricks terrain.png 4,6 1
98 Damaged_Stone_Bricks terrain.png 5,6 2
99 Brown_Mushroom_Block terrain.png 13,7
100 Red_Mushroom_Block terrain.png 14,7
101 Iron_Bars terrain.png 5,5
102 Glass_Pane special.png 1,3
103 Melon terrain.png 8,8
104 Pumpkin_Stem terrain.png 5,5
105 Melon_Stem terrain.png 5,5
106 Vines special.png 5,4
107 Fence_Gate special.png 3,3
108 Brick_Stairs special.png 0,1
109 Stone_Brick_Stairs special.png 2,1
110 Mycelium terrain.png 13,4
111 Lily_Pad special.png 4,4
112 Nether_Brick terrain.png 0,14
113 Nether_Brick_Fence special.png 4,2
114 Nether_Brick_Stairs special.png 4,1
115 Nether_Wart terrain.png 2,14
116 Enchantment_Table terrain.png 6,11
117 Brewing_Stand terrain.png 13,9
118 Cauldron terrain.png 10,9
119 Air_Portal special.png 1,5
120 Air_Portal_Frame terrain.png 15,9
# Items
# ID NAME FILE CORDS DAMAGE
256 Iron_Shovel items.png 2,5 +250
257 Iron_Pickaxe items.png 2,6 +250
258 Iron_Axe items.png 2,7 +250
259 Flint_and_Steel items.png 5,0 +64
260 Apple items.png 10,0 x1
261 Bow items.png 5,1 x1
262 Arrow items.png 5,2
263 Coal items.png 7,0 0
263 Charcoal items.png 7,0 1
264 Diamond items.png 7,3
265 Iron_Ingot items.png 7,1
266 Gold_Ingot items.png 7,2
267 Iron_Sword items.png 2,4 +250
268 Wooden_Sword items. |
edx/insights | src/edinsights/modulefs/modulefs.py | Python | agpl-3.0 | 3,765 | 0.012218 | import json
import os
import os.path
import types
from django.conf import settings
from models import FSExpirations
if settings.DJFS['type'] == 'osfs':
from fs.osfs import OSFS
elif settings.DJFS['type'] == 's3fs':
from fs.s3fs import S3FS
from boto.s3.connection import S3Connection
from boto.s3.key import Key
s3conn = S3Connection()
else:
raise AttributeError("Bad filesystem: "+str(settings.DJFS['type']))
def get_filesystem(namespace):
''' Returns a pyfilesystem for static module storage.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
'''
if settings.DJFS['type'] == 'osfs':
return get_osfs( namespace )
elif settings.DJFS['type'] == 's3fs':
return get_s3fs( namespace )
else:
raise AttributeError("Bad filesystem: "+str(settings.DJFS['type']))
def expire_objects():
''' Remove all obsolete objects from the file systems. Untested. '''
objects = sorted(FSExpirations.expired(), key=lambda x:x.module)
fs = None
module = None
for o in objects:
if module != o.module:
module = o.module
| fs = get_filesystem(module)
if fs.exists(o.filename):
fs.remove(o.filename)
o.delete()
def patch_fs(fs, namespace, url_method):
''' | Patch a filesystem object to add two methods:
get_url returns a URL for a resource stored on that filesystem. It takes two parameters:
filename: Which resource
timeout: How long that resource is available for
expire sets a timeout on how long the system should keep the resource. It takes four parameters:
filename: Which resource
seconds: How long we will keep it
days: (optional) More user-friendly if a while
expires: (optional) boolean; if set to False, we keep the resource forever.
Without calling this method, we provide no guarantees on how long resources will stick around.
'''
def expire(self, filename, seconds, days=0, expires = True):
''' Set the lifespan of a file on the filesystem.
filename: Name of file
expire: False means the file will never be removed
seconds and days give time to expiration.
'''
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires = expires)
fs.expire = types.MethodType(expire, fs)
fs.get_url = types.MethodType(url_method, fs)
return fs
def get_osfs(namespace):
''' Helper method to get_filesystem for a file system on disk '''
full_path = os.path.join(settings.DJFS['directory_root'], namespace)
if not os.path.exists(full_path):
os.makedirs(full_path)
osfs = OSFS(full_path)
osfs = patch_fs(osfs, namespace, lambda self, filename, timeout=0:os.path.join(settings.DJFS['url_root'], namespace, filename))
return osfs
def get_s3fs(namespace):
''' Helper method to get_filesystem for a file system on S3 '''
fullpath = namespace
if 'prefix' in settings.DJFS:
fullpath = os.path.join(settings.DJFS['prefix'], fullpath)
s3fs = S3FS(settings.DJFS['bucket'], fullpath)
def get_s3_url(self, filename, timeout=60):
global s3conn
try:
return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
except: # If connection has timed out
s3conn = S3Connection()
return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
s3fs = patch_fs(s3fs, namespace, get_s3_url)
return s3fs
|
cnbird1999/ava | ava/import_google/migrations/0008_googledirectorygroup_group.py | Python | gpl-2.0 | 502 | 0.001992 | # -*- coding: utf-8 -*-
from __future__ import unicode_litera | ls
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core_group', '0001_initial'),
('import_google', '0007_googledirectorygroup_identity'),
]
operations = [
migrations.AddField(
model_name='googledirectorygroup',
name='group',
field=models.ForeignKey( | to='core_group.Group', blank=True, null=True),
),
]
|
dreadatour/Cactus | cactus/tests/test_ui.py | Python | bsd-3-clause | 2,199 | 0.003183 | # coding:utf-8
import unittest2 as unittest
from cactus import ui
class UITestCase(unittest.TestCase):
def test_coerce_yes_no(self):
self.assertEqual(True, ui._yes_no_coerce_fn("y"))
self.assertEqual(True, ui._yes_no_coerce_fn("Y"))
self.assertEqual(False, ui._yes_no_coerce_fn("n"))
self.assertEqual(False, ui._yes_no_coerce_fn("N"))
self.assertRaises(ui.InvalidInput, ui._yes_no_coerce_fn, "True")
self.assertRaises(ui.InvalidInput, ui._yes_no_coerce_fn, "False")
self.assertRaises(ui.InvalidInput, ui._yes_no_coerce_fn, "yes")
self.assertRaises(ui.InvalidInput, ui._yes_no_coerce_fn, "no")
def test_coerce_normalized(self):
self.assertEqual("a", ui._normalized_coerce_fn("a "))
self.assertEqual("a", ui._normalized_coerce_fn("A "))
self.assertEqual("a", ui._normalized_coerce_fn(" A "))
def test_coerce_url(self):
self.assertEqual("http://www.example.com/", ui._url_coerce_fn("http://www.example.com/"))
self.assertEqual("http://www.example.com/", ui._url_coerce_fn("http://www.EXAMPLE.com/"))
self.assertEqual("http://www.example.com/", ui._url_coerce_fn("http://www.example.com"))
self.assertRaises(ui.InvalidInput, ui._url_coerce_fn, "")
self.assertRaises(ui.InvalidInput, ui. | _url_coerce_fn, "www.example.com")
self.assertRaises(ui.InvalidInput, ui._url_coerce_fn, "www.example.com ")
self.assertRaises(ui.InvalidInput, ui._url_coerce_fn, "http://")
self.assertRaises(ui.InvalidInput, ui._url_coerce_fn, | "/")
self.assertRaises(ui.InvalidInput, ui._url_coerce_fn, "http://www.example.com/somewhere/")
self.assertRaises(ui.InvalidInput, ui._url_coerce_fn, "http://www.example.com/#hash")
# Disabled for now with the desktop app
# class InteractiveUITestCase(BaseTestCase):
# def test_site_url_not_set(self):
# class DummyUI(object):
# def prompt_url(self, q):
# return "http://example.com"
# site = Site(self.path, ui=DummyUI())
# self.assertEqual(None, site.url)
# site.build()
# self.assertEqual("http://example.com", site.url)
|
lhilt/scipy | scipy/stats/_rvs_sampling.py | Python | bsd-3-clause | 6,992 | 0 | from __future__ import division, print_function, absolute_import
import numpy as np
import warnings
from scipy._lib._util import check_random_state
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is the probability
density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If `(U, V)` is a random vector uniformly distributed over `A`,
then `V/U + c` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the pdf, i.e. no inversion of the cdf is required. Typical
choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate `(U, V)` uniformly on `R` and return
`V/U + c` if `(U, V)` are also in `A` which can be directly
verified.
Intuitively, the method works well if `A` fills up most of the
enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw `(U, V)` uniformly
distributed on `R` such that `(U, V)` is also in `A` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact
that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning
is displayed if this ratio is larger than 20. Moreover, if the sampling
fails to generate a single random variate after 50000 iterations (i.e.
not a single draw is in `A`), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain `A`), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> from scipy import stats
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case.
>>> f = stats.norm.pdf
>>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500)
The K-S test confirms that the random variates are indeed normally
distributed (normality is no | t rejected at 5% significance level):
>>> stats.kstest(rvs, 'norm')[1]
0.3420173467307603
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
... vm | in=0, vmax=2*np.exp(-1), size=1000)
>>> stats.kstest(rvs, 'expon')[1]
0.928454552559516
Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g.
[2]_ above in the case of the generalized inverse Gaussian distribution.
"""
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1])
if exp_iter > 20:
msg = ("The expected number of iterations to generate a single random "
"number from the desired distribution is larger than {}, "
"potentially causing bad performance.".format(int(exp_iter)))
warnings.warn(msg, RuntimeWarning)
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
rng = check_random_state(random_state)
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite
# to avoid infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if exp_iter = 1000, probability of
# this event is (1-1/1000)**50000 which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = umax * rng.random_sample(size=k)
v1 = vmin + (vmax - vmin) * rng.random_sample(size=k)
# apply rejection method
rvs = v1 / u1 + c
accept = (u1**2 <= pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated in {} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds.".format(i*N))
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
|
deddokatana/PyMiningCalc | PyMiningCalc/calc.py | Python | gpl-3.0 | 2,447 | 0.009399 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Crypto-currency mining calculator
Copyright (C) 2013 "Working4coins" <working4coins@gmail.com>
You can donate: https://sites.google.com/site/working4coins/donate
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, s | ee <http://www.gnu.org/licenses/>
"""
import time
import datetime
#import simplejson as json
#import locale
import os
import argparse
from MiningCalc.miningcalc_btc import BTCMiningCalculator
from MiningCalc.miningcalc_ltc import LTCMiningCalculator
if __name__ == "__main__":
#locale.setlocale(locale.LC_ALL, 'en_US') # to print number with commas as thousands separators
#locale.setlocale(locale.LC_ALL | , 'fr_FR') # to print number with commas as thousands separators
parser = argparse.ArgumentParser(description='Use the following parameters')
parser.add_argument('currency1', action="store", help="use this flag to set currency mining calculator (BTC, LTC...)")
parser.add_argument('--loop', action="store", help="use this flag to run program in an infinite loop (LOOP parameters is pause in seconds)")
args = parser.parse_args()
args.basepath = os.path.dirname(__file__)
args.currency1 = args.currency1.upper()
if args.currency1=='BTC':
calc = BTCMiningCalculator()
elif args.currency1=='LTC':
calc = LTCMiningCalculator()
else:
calc = None
if calc!=None:
if args.loop==None:
calc.update()
else:
delay_s = float(args.loop)
while True:
calc.update()
dt_next = datetime.now() + timedelta(seconds=delay_s)
print("="*10)
print("Waiting... next update @ {dt_next}".format(dt_next=dt_next.strftime("%Y-%m-%d %H:%M")))
time.sleep(delay_s)
else:
print("Undefined currency")
|
mitdbg/modeldb | client/verta/verta/_swagger/_public/modeldb/model/ModeldbDatasetPartInfo.py | Python | mit | 1,057 | 0.014191 | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbDatasetPartInfo(BaseType):
def __init__(self, path=None, size=None, checksum=None, last_modified_at_source=None):
required = {
"path": False,
"size": False,
| "checksum": False,
"last_modified_at_source": False,
}
self.path = path
self.size = size
self.checksum = checksum
self.last_modified_at_source = last_modified_at_source
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('path', None)
if tmp is not None:
d['path'] = tmp
tmp = d.get('size', None)
if tmp is not Non | e:
d['size'] = tmp
tmp = d.get('checksum', None)
if tmp is not None:
d['checksum'] = tmp
tmp = d.get('last_modified_at_source', None)
if tmp is not None:
d['last_modified_at_source'] = tmp
return ModeldbDatasetPartInfo(**d)
|
hy-2013/scrapy | tests/test_pipeline_media.py | Python | bsd-3-clause | 10,657 | 0.002252 | from __future__ import print_function
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python import log as txlog
from scrapy.http import Request, Response
from scrapy.spider import Spider
from scrapy.utils.request import request_fingerprint
from scrapy.contrib.pipeline.media import MediaPipeline
from scrapy.utils.signal import disconnect_all
from scrapy import signals
from scrapy import log
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class BaseMediaPipelineTestCase(unittest.TestCase):
pipeline_class = MediaPipeline
def setUp(self):
self.spider = Spider('media.com')
self.pipe = self.pipeline_class(download_func=_mocked_download_func)
self.pipe.open_spider(self.spider)
self.info = self.pipe.spiderinfo
def tearDown(self):
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
def test_default_media_to_download(self):
request = Request('http://url')
assert self.pipe.media_to_download(request, self.info) is None
def test_default_get_media_requests(self):
item = dict(name='name')
assert self.pipe.get_media_requests(item, self.info) is None
def test_default_media_downloaded(self):
request = Request('http://url')
response = Response('http://url', body='')
assert self.pipe.media_downloaded(response, request, self.info) is response
def test_default_media_failed(self):
request = Request('http://url')
fail = Failure(Exception())
assert self.pipe.media_failed(fail, request, self.info) is fail
def test_default_item_completed(self):
item = dict(name='name')
assert self.pipe.item_completed([], item, self.info) is item
# Check that failures are logged by default
fail = Failure(Exception())
results = [(True, 1), (False, fail)]
events = []
txlog.addObserver(events.append)
new_item = self.pipe.item_completed(results, item, self.info)
txlog.removeObserver(events.append)
self.flushLoggedErrors()
assert new_item is item
assert len(events) == 1
assert events[0]['logLevel'] == log.ERROR
assert events[0]['failure'] is fail
# disable failure logging and check again
self.pipe.LOG_FAILED_RESULTS = False
events = []
txlog.addObserver(events.append)
new_item = self.pipe.item_completed(results, item, self.info)
txlog.removeObserver(events.append)
self.flushLoggedErrors()
assert new_item is item
assert len(events) == 0
@inlineCallbacks
def test_default_process_item(self):
item = dict(name='name')
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
class MockedMediaPipeline(MediaPipeline):
def __init__(self, *args, **kwargs):
super(MockedMediaPipeline, self).__init__(*args, **kwargs)
self._mockcalled = []
def download(self, request, info):
self._mockcalled.append('download')
return super(MockedMediaPipeline, self).download(request, info)
def media_to_download(self, request, info):
self._mockcalled.append('media_to_download')
if 'result' in request.meta:
return request.meta.get('result')
return super(MockedMediaPipeline, self).media_to_download(request, info)
def get_media_requests(self, item, info):
self._mockcalled.append('get_media_requests')
return item.get('requests')
def media_downloaded(self, response, request, info):
self._mockcalled.append('media_downloaded')
return super(MockedMediaPipeline, self).media_downloaded(response, request, info)
def media_failed(self, failure, request, info):
self._mockcalled.append('media_failed')
return super(MockedMediaPipeline, self).media_failed(failure, request, info)
def item_completed(self, results, item, info):
self._mockcalled.append('item_completed')
item = super(MockedMediaPipeline, self).item_completed(results, item, info)
item['results'] = results
return item
class MediaPipelineTestCase(BaseMediaPipelineTestCase):
pipeline_class = MockedMediaPipeline
@inlineCallbacks
def test_result_succeed(self):
cb = lambda _: self.pipe._mockcalled.append('request_callback') or _
eb = lambda _: self.pipe._mockcalled.append('request_errback') or _
rsp = Response('http://url1')
req = Request('http://url1', meta=dict(response=rsp), callback=cb, errback=eb)
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp)])
self.assertEqual(self.pipe._mockcalled,
['get_media_requests', 'media_to_download',
'media_downloaded', 'request_callback', 'item_completed'])
@inlineCallbacks
def test_result_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
cb = lambda _: self.pipe._mockcalled.append('request_callback') or _
eb = lambda _: self.pipe._mockcalled.append('request_errback') or _
fail = Failure(Exception())
req = Request('http://url1', meta=dict(response=fail), callback=cb, errback=eb)
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(False, fail)])
self.assertEqual(self.pipe._mockcalled,
['get_media_requests', 'media_to_download',
'media_failed', 'request_errback', 'item_completed'])
@inlineCallbacks
def test_mix_of_success_and_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
fail = Failure(Exception())
req2 = Request('http://url2', meta=dict(response=fail))
item = dict(requests=[req1, req2])
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp1), (False, fail)])
m = self.pipe._mockcalled
# only once
self.assertEqual(m[0], 'get_media_requests') # first hook called
self.assertEqual(m.count('get_media_requests'), 1)
self.assertEqual(m.count('item_completed'), 1)
self.assertEqual(m[-1], 'item_completed') # last hook called
# twice, one per request
self.assertEqual(m.count('media_to_download'), 2)
# one to handle success and other for failure
self.assertEqual(m.count('media_downloaded'), 1)
self.assertEqual(m.count('media_failed'), 1)
@inlineCallbacks
def test_get_media_requests(self):
# returns single Request (without callback)
req = Request('http://url')
item = dict(requests=req) # pass a single item
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
assert request_fingerprint(req) in self.info.downloaded
# returns iterable of Requests
req1 = Request('http://url1')
req2 = Request('http://url2')
item = dict(requests=iter([req1, req2]))
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
assert request_fingerprint(req1) in self.info.downloaded
assert request_fingerprint(req2) in self.info.downloaded
@inlineCallbacks
def test_results_are_cached_across_multiple_items(self):
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
item = dict(requests=req1)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertTrue(ne | w_i | tem is item)
self.assertEqual(new_item['results'], [(True, rsp1)] |
aetilley/revscoring | revscoring/utilities/score.py | Python | mit | 1,550 | 0.001935 | """
``revscoring score -h``
::
Scores a set of revisions.
Usage:
score (-h | --help)
score <model-file> <rev_id>... --api=<uri> [--verbose]
Options:
-h --help Print this documentation
<model-file> Path to a model file
--api=<url> The url pointing to a MediaWiki API to use for extracting
features
--verbose Print debugging info
<rev_id> A revision identifier
"""
import json
import logging
import sys
import traceback
import docopt
from mw import api
from ..extractors import APIExtractor
from ..scorer_models import MLScorerModel
def main(argv=None):
ar | gs = docopt.docopt(__doc__, argv=argv)
model = MLScorerModel.load(open(args['<model-file>'], 'rb'))
extractor = APIExtractor(api.Session(args['--api']),
language=model.language)
rev_ids = [int(rev_id) for rev_id in args['<rev_id>']]
verbose = args['--verbos | e']
run(model, extractor, rev_ids, verbose)
def run(model, extractor, rev_ids, verbose):
if verbose: logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
error_features = extractor.extract(rev_ids, model.features)
for rev_id, (error, values) in zip(rev_ids, error_features):
if error is not None:
print("\t".join([str(rev_id), str(error)]))
else:
score = model.score(values)
print("\t".join([str(rev_id), json.dumps(score)]))
|
ormiret/tinfoil-hat | scrape_moray.py | Python | apache-2.0 | 1,124 | 0.032918 | import urllib, urllib2, re
from bs4 import BeautifulSoup
def scrape(url):
home = 'http://www.moray.gov.uk/'
datePattern = r'[0-9][0-9]-[0-9][0-9]-20[0-9][0-9]'
departments = r'(Chief Executive\'s Office|Corporate Services|Education and Social Care|Environmental Services|Multiple Services)'
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html)
links = soup.findAll('a', href=True)
for l in links:
if l.string is not None:
#print l.string
if re.search(departments, l.string) is not None:
page = urllib2.urlopen(home+l['href']).read()
pSoup = BeautifulSoup(page)
pLinks = pSoup.findAll('a', href=True)
for pl in pLinks:
if pl.string is not None:
try:
if re.search(datePattern, pl.string):
| #print pl.string + ' : ' + pl['href']
foi = urllib2.urlopen(home+pl['href']).read()
foiSoup = BeautifulSoup(foi)
bill = foiSoup.find('div', {'class': 'boxj_txt_ara'})
if bill is not None:
print bill.p
except UnicodeEncodeError:
pass
|
url = 'http://www.moray.gov.uk/moray_standard/page_62338.html'
scrape(url) |
RRShieldsCutler/clusterpluck | clusterpluck/tools/compileclustertypes.py | Python | mit | 1,143 | 0.029746 | #!/usr/bin/env python
# Takes antismash output files in the current directory
# and grabs the cluster types summary file, generating
# a single txt file with all the cluster ID, types, and ranges.
# USAGE
# $ python compileclustertypes.py -o output_filename
# Returns tab-delimited txt file in the current directory.
import os, sys
import pandas as pd
import argparse
def make_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-o","--output",
required=True,
help="Output filename [required]")
return parser
def main():
outfile = pd.DataFrame()
for dir in os.listdir('.'):
if dir.startswith('GCF'):
if 'cluster_sequences' not in os.listdir(dir):
pass
else:
for file in os.listdir(os.path.join(dir, 'cluster_sequences/')):
if file.sta | rtswith('abbrev'):
newdata = pd.read_csv(os.path.join(dir, 'cluster_sequences', file), delimiter='\t', header=0, usecols=[0,1,2])
outfile = outfile.append(newdata)
e | lse:
pass
else:
pass
outfile.to_csv(args.output,sep='\t',index=False)
if __name__ == '__main__':
parser = make_arg_parser()
args = parser.parse_args()
main()
|
dlareau/puzzlehunt_server | huntserver/tests.py | Python | mit | 33,380 | 0.002756 | from django.test import TestCase, override_settings
from django.urls import reverse
from huntserver import models, forms, templatetags
from django.contrib.auth.models import User
from django.utils import timezone
from django.core.exceptions import ValidationError
from datetime import timedelta
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
from http.server import SimpleHTTPRequestHandler
try:
from SocketServer import TCPServer as HTTPServer
except ImportError:
from http.server import HTTPServer
from threading import Thread
# python manage.py dumpdata --indent=4 --exclude=contenttypes --exclude=sessions --exclude=admin
# --exclude=auth.permission
# Users: admin, user1, user2, user3, user4, user5, user6
# admin is superuser/staff and on no teams
# user1 is on teams 2, 6, 8 (1-2, 2-3, 3-2)
# user2 is on teams 2, 6, 9 (1-2, 2-3, 3-3) # Reserved for ratelimiting
# user3 is on teams 3, 5 (1-3, 2-2 )
# user4 is on teams 3, 4 (1-3, 2-1 )
# user5 is on teams 6 ( 2-3 )
# user6 is not on any teams
# 3 Hunts: hunt 1 is in the past, hunt 2 is current and running, hunt 3 is in the future
# Hunt 1: Team limit of 5
# Hunt 2: Team limit of 3
# Hunt 3: Team limit of 3
# 3 puzzles per hunt
# 3 teams per hunt, in each hunt, second team is a playtesting team
def login(test, username):
test.assertTrue(test.client.login(username=username, password='password'))
def get_and_check_page(test, page, code, args={}):
response = test.client.get(reverse(page, kwargs=args))
test.assertEqual(response.status_code, code)
return response
def ajax_and_check_page(test, page, code, args={}):
response = test.client.get(reverse(page), args,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
test.assertEqual(response.status_code, code)
return response
def message_from_response(response):
messages = list(response.context['messages'])
if(len(messages) > 0):
return str(messages[0])
else:
return ""
def solve_puzzle_from_admin(test):
test.client.logout()
login(test, 'user5')
post_context = {'answer': "wrong answer"}
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
post_context)
test.assertEqual(response.status_code, 200)
post_context = {'answer': "ANSWER21"}
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
post_context)
test.assertEqual(response.status_code, 200)
post_context = {'answer': "wrong answer"}
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "202"}),
post_context)
test.assertEqual(response.status_code, 200)
response = test.client.post(reverse('huntserver:puzzle', kwargs={"puzzle_id": "201"}),
post_context)
test.assertEqual(response.status_code, 200)
test.client.logout()
login(test, 'admin')
class nonWebTests(TestCase):
fixtures = ["basic_hunt"]
def setUp(self):
puzzle = models.Puzzle.objects.get(pk=5)
team = models.Team.objects.get(pk=2)
models.Submission.objects.create(team=team, submission_time=timezone.now(), puzzle=puzzle,
submission_text="foobar", modified_date=timezone.now())
models.Solve.objects.create(puzzle=puzzle, team=team,
submission=models.Submission.objects.all()[0])
models.Unlock.objects.create(puzzle=puzzle, team=team, time=timezone.now())
models.Message.objects.cr | eate | (team=team, is_response=False, text="foobar",
time=timezone.now())
models.Unlockable.objects.create(puzzle=puzzle, content_type="TXT", content="foobar")
def test_unicode(self):
str(models.Hunt.objects.all()[0])
str(models.Puzzle.objects.all()[0])
str(models.Person.objects.all()[0])
# str(models.Person.objects.all()[-1])
str(models.Submission.objects.all()[0])
str(models.Solve.objects.all()[0])
str(models.Unlock.objects.all()[0])
str(models.Message.objects.all()[0])
str(models.Unlockable.objects.all()[0])
str(models.Response.objects.all()[0])
# str(models.HuntAssetFile.objects.all()[0])
def test_hunt_cleaning(self):
with self.assertRaises(ValidationError):
hunt = models.Hunt.objects.get(is_current_hunt=True)
hunt.is_current_hunt = False
hunt.save()
def test_bootstrap_tag(self):
templatetags.bootstrap_tags.active_page(None, None)
# Try to cover Resolver404 case
class InfoTests(TestCase):
fixtures = ["basic_hunt"]
def test_index(self):
"Test the index page"
response = get_and_check_page(self, 'huntserver:index', 200)
self.assertTrue(isinstance(response.context['curr_hunt'], models.Hunt))
def test_previous_hunts(self):
"Test the previous hunts page"
response = get_and_check_page(self, 'huntserver:previous_hunts', 200)
self.assertTrue('hunts' in response.context)
for hunt in response.context['hunts']:
self.assertTrue(isinstance(hunt, models.Hunt))
def test_registration1(self):
"Test the registration page when not logged in"
response = get_and_check_page(self, 'huntserver:registration', 200)
self.assertEqual(message_from_response(response), "")
def test_registration2(self):
"Test the registration page when logged in and on a team"
login(self, 'user1')
response = get_and_check_page(self, 'huntserver:registration', 200)
self.assertTrue('registered_team' in response.context)
self.assertTrue(isinstance(response.context['registered_team'], models.Team))
def test_registration3(self):
"Test the registration page when logged in and not on a team"
login(self, 'user6')
response = get_and_check_page(self, 'huntserver:registration', 200)
self.assertEqual(message_from_response(response), "")
self.assertTrue('teams' in response.context)
for hunt in response.context['teams']:
self.assertTrue(isinstance(hunt, models.Team))
def test_registration_post_new(self):
"Test the registration page's join team functionality"
login(self, 'user6')
post_context = {"form_type": "new_team", "team_name": "new_team",
"need_room": "need_a_room"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['user'].person.teams.all()), 1)
team = response.context['user'].person.teams.all()[0]
self.assertEqual(response.context['registered_team'], team)
self.assertEqual(team.team_name, post_context['team_name'])
self.assertEqual(team.location, post_context['need_room'])
self.assertEqual(team.hunt, models.Hunt.objects.get(is_current_hunt=True))
self.assertEqual(team.playtester, False)
self.assertTrue(len(team.join_code) >= 5)
def test_registration_post_join(self):
"Test the registration page's new team functionality"
login(self, 'user6')
post_context = {"form_type": "join_team", "team_name": "Team2-2",
"join_code": "JOIN5"}
response = self.client.post(reverse('huntserver:registration'), post_context)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['user'].person.teams.all()), 1)
team = response.context['user'].person.teams.all()[0]
self.assertEqual(response.context['registered_team'], team)
self.assertEqual(team.team_name, post_context['team_name'])
self.assertEqual(team.hunt, models.Hunt.objects.get(is_current_hunt=True))
self.assertEqual(len(team.person_set.all()), 2)
def test_registration_post_leave(self):
|
wbsljh/caravel | caravel/models.py | Python | apache-2.0 | 73,950 | 0.000325 | """A collection of ORM sqlalchemy models for Caravel"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import functools
import json
import logging
import re
import textwrap
from collections import namedtuple
from copy import deepcopy, copy
from datetime import timedelta, datetime, date
import humanize
import pandas as pd
import requests
import sqlalchemy as sqla
from sqlalchemy.engine.url import make_url
import sqlparse
from dateutil.parser import parse
from flask import Markup, url_for
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn
from | flask_appbu | ilder.models.decorators import renders
from flask_appbuilder.filemanager import get_file_original_name
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.filters import Dimension, Filter
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.having import Aggregation
from six import string_types
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean,
DateTime, Date, Table, Numeric,
create_engine, MetaData, desc, asc, select, and_, func
)
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.sql import table, literal_column, text, column
from sqlalchemy.sql.expression import ColumnClause, TextAsFrom
from sqlalchemy_utils import EncryptedType
from werkzeug.datastructures import ImmutableMultiDict
import caravel
from caravel import app, db, get_session, utils, sm
from caravel.source_registry import SourceRegistry
from caravel.viz import viz_types
from caravel.utils import flasher, MetricPermException, DimSelector
config = app.config
QueryResult = namedtuple('namedtuple', ['df', 'query', 'duration'])
FillterPattern = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = Column(DateTime, default=datetime.now, nullable=True)
changed_on = Column(
DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(cls): # noqa
return Column(Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(cls): # noqa
return Column(
Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, onupdate=cls.get_user_id, nullable=True)
@renders('created_on')
def creator(self): # noqa
return '{}'.format(self.created_by or '')
@property
def changed_by_(self):
return '{}'.format(self.changed_by or '')
@renders('changed_on')
def changed_on_(self):
return Markup(
'<span class="no-wrap">{}</span>'.format(self.changed_on))
@renders('changed_on')
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return Markup('<span class="no-wrap">{}</span>'.format(s))
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id'))
)
class Slice(Model, AuditMixinNullable):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(2000))
owners = relationship("User", secondary=slice_user)
def __repr__(self):
return self.slice_name
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(
self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@renders('datasource_name')
def datasource_link(self):
return self.datasource.link
@property
def datasource_edit_url(self):
self.datasource.url
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
d['error'] = str(e)
d['slice_id'] = self.id
d['slice_name'] = self.slice_name
d['description'] = self.description
d['slice_url'] = self.slice_url
d['edit_url'] = self.edit_url
d['description_markeddown'] = self.description_markeddown
return d
@property
def json_data(self):
return json.dumps(self.data)
@property
def slice_url(self):
"""Defines the url to access the slice"""
try:
slice_params = json.loads(self.params)
except Exception as e:
logging.exception(e)
slice_params = {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
from werkzeug.urls import Href
href = Href(
"/caravel/explore/{obj.datasource_type}/"
"{obj.datasource_id}/".format(obj=self))
return href(slice_params)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
name = escape(self.slice_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_viz(self, url_params_multidict=None):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:param werkzeug.datastructures.MultiDict url_params_multidict:
Contains the visualization params, they override the self.params
stored in the database
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params) # {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
slice_params['viz_type |
kylepjohnson/cltk | src/cltk/stem/__init__.py | Python | mit | 61 | 0 | """Init for `cltk.tokenize`."""
from .processes imp | ort *
| |
coreswitch/openconfigd | vendor/github.com/osrg/gobgp/test/scenario_test/bgp_router_test.py | Python | apache-2.0 | 16,381 | 0 | # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import sys
import time
import unittest
from fabric.api import local
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
BGP_FSM_IDLE,
BGP_FSM_ACTIVE,
BGP_FSM_ESTABLISHED,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
wait_for_completion,
assert_several_times,
)
from lib.gobgp import (
GoBGPContainer,
extract_path_attribute,
)
from lib.quagga import QuaggaBGPContainer
from lib.exabgp import ExaBGPContainer
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
q1 = QuaggaBGPContainer(name='q1', asn=65001, router_id='192.168.0.2')
q2 = QuaggaBGPContainer(name='q2', asn=65002, router_id='192.168.0.3')
q3 = QuaggaBGPContainer(name='q3', asn=65003, router_id='192.168.0.4')
qs = [q1, q2, q3]
ctns = [g1, q1, q2, q3]
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for q in qs:
g1.add_peer(q, passwd='passwd')
q.add_peer(g1, passwd='passwd', passive=True)
# advertise a route from q1, q2, q3
for idx, q in enumerate(qs):
route = '10.0.{0}.0/24'.format(idx + 1)
q.add_route(route)
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.itervalues():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_02_check_gobgp_global_rib(self):
for q in self.quaggas.itervalues():
# paths expected to exist in gobgp's global rib
routes = q.routes.keys()
timeout = 120
interval = 1
count = 0
while True:
# gobgp's global rib
state = self.gobgp.get_neighbor_state(q)
self.assertEqual(state, BGP_FSM_ESTABLISHED)
global_rib = [p['prefix'] for p in self.gobgp.get_global_rib()]
for p in global_rib:
if p in routes:
routes.remove(p)
if len(routes) == 0:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
# check gobgp properly add it's own asn to aspath
def test_03_check_gobgp_adj_out_rib(self):
for q in self.quaggas.itervalues():
for path in self.gobgp.get_adj_rib_out(q):
asns = path['aspath']
self.assertTrue(self.gobgp.asn in asns)
# check routes are properly advertised to all BGP speaker
def test_04_check_quagga_global_rib(self):
interval = 1
timeout = int(120 / interval)
for q in self.quaggas.itervalues():
done = False
for _ in range(timeout):
if done:
break
global_rib = q.get_global_rib()
global_rib = [p['prefix'] for p in global_rib]
if len(global_rib) < len(self.quaggas):
time.sleep(interval)
continue
self.assertTrue(len(global_rib) == len(self.quaggas))
for c in self.quaggas.itervalues():
for r in c.routes:
self.assertTrue(r in global_rib)
done = True
if done:
continue
# should not reach here
raise AssertionError
def test_05_add_quagga(self):
q4 = QuaggaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
self.quaggas['q4'] = q4
initial_wait_time = q4.run()
time.sleep(initial_wait_time)
self.gobgp.add_peer(q4)
q4.add_peer(self.gobgp)
q4.add_route('10.0.4.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q4)
def test_06_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_07_stop_one_quagga(self):
g1 = self.gobgp
q4 = self.quaggas['q4']
q4.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q4)
g1.del_peer(q4)
del self.quaggas['q4']
# check gobgp properly send withdrawal message with q4's route
def test_08_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_09_add_di | stant_relative(self):
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q5 = QuaggaBGPContainer(name='q5', asn=65005, router_id='192.168.0.6')
initial_wait_time | = q5.run()
time.sleep(initial_wait_time)
for q in [q2, q3]:
q5.add_peer(q)
q.add_peer(q5)
med200 = {'name': 'med200',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 200}
q2.add_policy(med200, self.gobgp, 'out')
med100 = {'name': 'med100',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 100}
q3.add_policy(med100, self.gobgp, 'out')
q5.add_route('10.0.6.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q2)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
q2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
q3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
timeout = 120
interval = 1
count = 0
while True:
paths = self.gobgp.get_adj_rib_out(q1, '10.0.6.0/24')
if len(paths) > 0:
path = paths[0]
print "{0}'s nexthop is {1}".format(path['nlri']['prefix'],
path['nexthop'])
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
if path['nexthop'] in n_addrs:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
def test_10_originate_path(self):
self.gobgp.add_route('10.10.0.0/24')
dst = self.gobgp.get_global_rib('10.10.0.0/24')
self.assertTrue(len(dst) == 1)
self.assertTrue(len(dst[0]['paths']) == 1)
path = dst[0]['paths'][0]
self.assertTrue(path['nexthop'] == '0.0.0.0')
self.assertTrue(len(path['aspath']) == 0)
def test_11_check_adj_rib_out(self):
for q in self.quaggas.itervalues():
paths = self.gobgp.get_adj_rib_out(q, '10.10.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
peer_info = self.gobgp.peers[q]
local_addr = peer_info['local_addr'].split('/')[0]
self.assertTrue(path['nexthop'] == local_addr)
self.assertTrue(path['aspath'] == [self.gobgp.asn])
def test_12_disable_p |
ceball/param | tests/API1/testdaterangeparam.py | Python | bsd-3-clause | 2,007 | 0.003986 | """
Unit tests for DateRange parameter.
"""
import datetime as dt
import param
from . import API1TestCase
# Assuming tests of range parameter cover most of what's needed to
# test date range.
class TestDateRange(API1TestCase):
bad_range = (dt.datetime(2017,2,27),dt.datetime(2017,2,26))
def test_wrong_type_default(self):
try:
class Q(param.Parameterized):
a = param.DateRange(default=(1.0,2.0))
except ValueError:
pass
else:
raise AssertionError("Bad date type was accepted.")
def test_wrong_type_init(self):
class Q(param.Parameterized):
a = param.DateRa | nge()
try:
Q(a=self.bad_range)
except ValueError:
pass
else:
raise AssertionError( | "Bad date type was accepted.")
def test_wrong_type_set(self):
class Q(param.Parameterized):
a = param.DateRange()
q = Q()
try:
q.a = self.bad_range
except ValueError:
pass
else:
raise AssertionError("Bad date type was accepted.")
def test_start_before_end_default(self):
try:
class Q(param.Parameterized):
a = param.DateRange(default=self.bad_range)
except ValueError:
pass
else:
raise AssertionError("Bad date range was accepted.")
def test_start_before_end_init(self):
class Q(param.Parameterized):
a = param.DateRange()
try:
Q(a=self.bad_range)
except ValueError:
pass
else:
raise AssertionError("Bad date range was accepted.")
def test_start_before_end_set(self):
class Q(param.Parameterized):
a = param.DateRange()
q = Q()
try:
q.a = self.bad_range
except ValueError:
pass
else:
raise AssertionError("Bad date range was accepted.")
|
Fuzzwah/mplayer-web-rfid-control | sse.py | Python | gpl-2.0 | 4,199 | 0.006192 | """
This component is modified from https://github.com/marinho/tornado
Released unde | r the Apache License http://www.apache.org/licenses/LICENSE-2.0.html
"""
import time
import tornado.web, tornado.escape, tornado. | ioloop
import hashlib, json
class SSEHandler(tornado.web.RequestHandler):
_closing_timeout = False
_live_connections = [] # Yes, this list is declared here because it is used by the class methods
def __init__(self, application, request, **kwargs):
super(SSEHandler, self).__init__(application, request, **kwargs)
self.stream = request.connection.stream
self._closed = False
def initialize(self):
self.set_header('Content-Type','text/event-stream; charset=utf-8')
self.set_header('Cache-Control','no-cache')
self.set_header('Connection','keep-alive')
def generate_id(self):
return hashlib.md5('%s-%s-%s'%(
self.request.connection.address[0],
self.request.connection.address[1],
time.time(),
)).hexdigest()
@tornado.web.asynchronous
def get(self):
# Sending the standard headers
headers = self._generate_headers()
self.write(headers); self.flush()
# Adding the current client instance to the live handlers pool
self.connection_id = self.generate_id()
SSEHandler._live_connections.append(self)
self.id_counter = 0
# Calling the open event
self.on_open()
def on_open(self, *args, **kwargs):
"""Invoked for a new connection opened."""
pass
def on_close(self):
"""Invoked when the connection for this instance is closed."""
pass
def close(self):
"""Closes the connection for this instance"""
if not self._closed and not getattr(self, '_closing_timeout', None):
self._closed = True
self._closing_timeout = tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 5, self._abort)
else:
tornado.ioloop.IOLoop.instance().remove_timeout(self._closing_timeout)
self.on_close() # Calling the closing event
self.stream.close()
def _abort(self):
"""Instantly aborts the connection by closing the socket"""
self._closed = True
self.stream.close()
@classmethod
def write_message_to_all(cls, data, id=False, event=False):
"""Sends a message to all live connections"""
[conn.write_message(data, id=id, event=event) for conn in cls._live_connections]
@tornado.web.asynchronous
def write_message(self, data, id=False, event=False):
message = tornado.escape.utf8(('id: %s\n'% (id if id else self.id_counter)) + ('event: %s\n'%event if event else '') + 'data: %s\n\n'%data)
self.id_counter += 1
self.write(message)
self.flush()
def remove_connection(self):
if self in SSEHandler._live_connections:
SSEHandler._live_connections.remove(self)
class FeedHandler(SSEHandler):
_history = []
_playlist = []
@classmethod
def send(self, message, id=False, event=False):
if event == "stopped":
FeedHandler._playlist = []
elif event == "finished":
FeedHandler.next()
msg = [message, id, event]
FeedHandler._history.append(msg)
self.write_message_to_all(json.dumps(FeedHandler.info()), event="playlist")
@classmethod
def info(self):
try:
return {'nowPlaying': FeedHandler._playlist[0],
'upNext': FeedHandler._playlist[1:6],
'more': len(FeedHandler._playlist[6:])}
except:
return {'nowPlaying': False, 'upNext': [], 'more': 0}
@classmethod
def newList(self, newPlayList):
FeedHandler._playlist = newPlayList
self.write_message_to_all(json.dumps(FeedHandler.info()), event="playlist")
@classmethod
def next(self):
FeedHandler._playlist = FeedHandler._playlist[1:]
def on_open(self):
self.write_message(self.connection_id, event='connection_id')
self.write_message(json.dumps(FeedHandler.info()), event="playlist")
|
magestik/TuxStereoViewer | src/lib_shutter.py | Python | gpl-3.0 | 6,327 | 0.041252 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import functions
import Image
import math, time
import sys
import pygtk
pygtk.require('2.0')
import gtk
import gtk.gtkgl
from OpenGL.GL import *
from OpenGL.GLU import *
from threading import Thread
import gobject
gobject.threads_init() # For prevent GTK freeze
import dbus
class controller(Thread):
def __init__(self, shutters, interface, left, right, rate):
Thread.__init__(self)
# Display variables
self.canvas = interface
self.left = left
self.right = right
self.rate = rate
self.quit = False
def loop(self):
start = shutters.get_dbus_method('start', 'org.stereo3d.shutters')
swap = shutters.get_dbus_method('swap', 'org.stereo3d.shutters')
stop = shutters.get_dbus_method('stop', 'org.stereo3d.shutters')
start() # starting the USB IR emitter
i = 0
eye = 0
count = 0
timeout = (1. / self.rate)
marge = 0.008 * timeout # 0.8% d'erreur
while not self.quit:
if count == 0:
c0 = time.time()
count = count + 1
t1 = time.time()
if(i == 0):
eye = swap('left')
#self.canvas.set_from_pixbuf(self.left) # Display
i = 1
else:
eye = swap('right')
#self.canvas.set_from_pixbuf(self.right) # Display
i = 0
delay = timeout - marge - time.time() + t1
if delay > 0:
time.sleep(delay)
if count == self.rate:
print time.time() - c0
count = 0
stop() # stopping the USB IR emitter
def run(self):
self.loop()
class Shutter:
"Shutter Glasses support class"
def __init__(self, parent):
self.vergence = 0 # Horizontal separation
self.vsep = 0 # Vertical separation
self.left = self.right = '' # Right and left Images
self.height = self.width = 0 # Height and Width
self.conf = functions.getConfig(self, 'shutter')
if self.conf == 0: # default configuration
self.conf = {}
self.conf['hardware'] = 'Nvidia3D' # OR eDimensionnal
self.conf['rate'] = '60'
print "OpenGL extension version - %d.%d\n" % gtk.gdkgl.query_version()
display_mode = ( gtk.gdkgl.MODE_RGB | gtk.gdkgl.MODE_DEPTH | gtk.gdkgl.MODE_DOUBLE )
try:
glconfig = gtk.gdkgl.Config(mode=display_mode)
except gtk.gdkgl.NoMatches:
display_mode &= ~gtk.gdkgl.MODE_DOUBLE
glconfig = gtk.gdkgl.Config(mode=display_mode)
print "is RGBA:", glconfig.is_rgba()
print "is double-buffered:", glconfig.is_double_buffered()
print "is stereo:", glconfig.is_stereo()
print "has alpha:", glconfig.has_alpha()
print "has depth buffer:", glconfig.has_depth_buffer()
print "has stencil buffer:", glconfig.has_stencil_buffer()
print "has accumulation buffer:", glconfig.has_accum_buffer()
self.parent = parent
# Drawing Area
parent.stereo = GlDrawingArea(glconfig, self)
#parent.stereo.set_size_request(WIDTH, HEIGHT)
def __del__(self):
functions.saveConfig(self, 'shutter', self.conf)
self.parent.stereo = gtk.DrawingArea()
try:
self.RefreshControl.quit = True
except:
print "Stop nothing ?"
def open(self, path, anaglyph=False):
try:
self.left, self.right = functions.set_sources_from_stereo(self, path, anaglyph)
self.oleft, self.oright = self.left, self.right # Back-up
size = self.left.size
self.height, self.width = size[1], size[0]
except:
print "Image doesn't exist !"
def open2(self, path='None', image='None'):
if path != 'None':
functions.set_sources_from_images(self, path[0], path[1])
elif image[0] != '':
self.left, self.right = image[0], image[1]
self.oleft, self.oright = image[0], image[1] # Back-up
taille = self.right.size
self.height, self.width = taille[1], taille[0]
def make(self, parent, fullscreen):
left = functions.image_to_drawable(self, self.left)
right = functions.image_to_drawable(self, self.right)
#left = functions.image_to_pixbuf(self, self.left)
#right = functions.image_to_pixbuf(self, self.right)
try:
bus = dbus.SessionBus()
shutters = bus.get_object('org.st | ereo3d.shutters', '/org/stereo3d/shutters')
self.RefreshControl = controller(parent.stereo, shutters, left, right, int(self.conf['rate']))
except:
print "Can't connect to the daemon !" # GTK POP-UP ?
else:
self.RefreshControl.start()
def swap_eyes(self):
self.left, self.right = self.right, self.left
def resize(self, maxw, maxh, force=0, normal=0):
if normal == 1: # Scale 1:1
self.right, self.left = self.origh | t, self.oleft # Backup
taille = self.right.size
self.height, self.width = taille[1], taille[0]
elif self.height > 0 and self.width > 0:
if self.height > maxh or self.width > maxw or force == 1:
qrh, qrw = (self.height + 0.00000000) / maxh, (self.width + 0.00000000) / maxw
qrmax = max(qrh, qrw)
height, width = int(math.ceil(self.height / qrmax)), int(math.ceil(self.width / qrmax))
self.right, self.left = self.oright, self.oleft # Backup
self.right, self.left = self.right.resize((width, height), Image.ANTIALIAS), self.left.resize((width, height), Image.ANTIALIAS)
self.height, self.width = height, width
class GlDrawingArea(gtk.DrawingArea, gtk.gtkgl.Widget):
def __init__(self, glconfig, app):
gtk.DrawingArea.__init__(self)
self.set_colormap(glconfig.get_colormap())
self._app = app
# Set OpenGL-capability to the drawing area
self.set_gl_capability(glconfig)
def Surface(self):
glDisable(GL_CULL_FACE)
glMap2f(GL_MAP2_VERTEX_3, 0, 1, 0, 1, ctrlpoints)
glMap2f(GL_MAP2_TEXTURE_COORD_2, 0, 1, 0, 1, texpts)
glEnable(GL_MAP2_TEXTURE_COORD_2)
glEnable(GL_MAP2_VERTEX_3)
glMapGrid2f(20, 0.0, 1.0, 20, 0.0, 1.0)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
print type(self.image)
glTexImage2D(GL_TEXTURE_2D, 0, 3, self.imageWidth, self.imageHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, self.image)
glEnable(GL_TEXTURE_2D)
glEnable(GL_DEPTH_TEST)
glEnable(GL_NORMALIZE)
glShadeModel(GL_FLAT)
self.list = glGenLists(1)
glNewList(self.list, GL_COMPILE)
glEvalMesh2(GL_FILL, 0, 20, 0, 20)
glEndList()
|
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django_extensions/db/fields/__init__.py | Python | agpl-3.0 | 20,102 | 0.000995 | """
Django Extensions additional model fields
"""
import re
import six
import string
import warnings
try:
import uuid
HAS_UUID = True
except ImportError:
HAS_UUID = False
try:
import shortuuid
HAS_SHORT_UUID = True
except ImportError:
HAS_SHORT_UUID = False
from django.core.exceptions import ImproperlyConfigured
from django.db.models import DateTimeField, CharField, SlugField
from django.utils.crypto import get_random_string
from django.template.defaultfilters import slugify
try:
from django.utils.timezone import now as datetime_now
assert datetime_now
except ImportError:
import datetime
datetime_now = datetime.datetime.now
try:
from django.utils.encoding import force_unicode # NOQA
except ImportError:
from django.utils.encoding import force_text as force_unicode # NOQA
MAX_UNIQUE_QUERY_ATTEMPTS = 100
class UniqueFieldMixin(object):
def check_is_bool(self, attrname):
if not isinstance(getattr(self, attrname), bool):
raise ValueError("'{}' argument must be True or False".format(attrname))
def get_queryset(self, model_cls, slug_field):
for field, model in model_cls._meta.get_fields_with_model():
if model and field == slug_field:
return model._default_manager.all()
return model_cls._default_manager.all()
def find_unique(self, model_instance, field, iterator, *args):
# exclude the current model instance from the queryset used in finding
# next valid hash
queryset = self.get_queryset(model_instance.__class__, field)
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
# form a kwarg dict used to impliment any unique_together contraints
kwargs = {}
for params in model_instance._meta.unique_together:
if self.attname in params:
for param in params:
kwargs[param] = getattr(model_instance, param, None)
new = six.next(iterator)
kwargs[self.attname] = new
while not new or queryset.filter(**kwargs):
new = six.next(iterator)
kwargs[self.attname] = new
setattr(model_instance, self.attname, new)
return new
class AutoSlugField(UniqueFieldMixin, SlugField):
""" AutoSlugField
By default, sets editable=False, blank=True.
Required arguments:
populate_from
Specifies which field or list of fields the slug is populated from.
Optional arguments:
separator
Defines the used separator (default: '-')
overwrite
If set to True, overwrites the slug on every save (default: False)
Inspired by SmileyChris' Unique Slugify snippet:
http://www.djangosnippets.org/snippets/690/
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('editable', False)
populate_from = kwargs.pop('populate_from', None)
if populate_from is None:
raise ValueError("missing 'populate_from' argument")
else:
self._populate_from = populate_from
self.slugify_function = kwargs.pop('slugify_function', slugify)
self.separator = kwargs.pop('separator', six.u('-'))
self.overwrite = kwargs.pop('overwrite', False)
self.check_is_bool('overwrite')
self.allow_duplicates = kwargs.pop('allow_duplicates', False)
self.check_is_bool('allow_duplicates')
super(AutoSlugField, self).__init__(*args, **kwargs)
def _slug_strip(self, value):
"""
Cleans up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
def slugify_func(self, content):
if content:
return self.slugify_function(content)
return ''
def slug_generator(self, original_slug, start):
yield original_slug
for i in range(start, MAX_UNIQUE_QUERY_ATTEMPTS):
slug = original_slug
end = '%s%s' % (self.separator, i)
end_len = len(end)
if self.slug_len and len(slug) + end_len > self.slug_len:
slug = slug[:self.slug_len - end_len]
slug = self._slug_strip(slug)
slug = '%s%s' % (slug, end)
yield slug
raise RuntimeError('max slug attempts for %s exceeded (%s)' %
(original_slug, MAX_UNIQUE_QUERY_ATTEMPTS))
def create_slug(self, model_instance, add):
# get fields to populate from and slug field to set
if not isinstance(self._populate_from, (list, tuple)):
self._populate_from = (self._populate_from, )
slug_field = model_instance._meta.get_field(self.attname)
if add or self.overwrite:
# slugify the original field content and set next step to 2
slug_for_field = lambda f | ield: self.slugify_func(getattr(model_instance, field))
slug = self.separator.join(map(slug_for_field, self._populate_from))
start = 2
else:
# get slug from the current model instance
slug = getattr(model_instance, self.attname)
# model_instance is being modified, and overwrite is False,
# so instead of doing anything, just return the curren | t slug
return slug
# strip slug depending on max_length attribute of the slug field
# and clean-up
self.slug_len = slug_field.max_length
if self.slug_len:
slug = slug[:self.slug_len]
slug = self._slug_strip(slug)
original_slug = slug
if self.allow_duplicates:
return slug
return super(AutoSlugField, self).find_unique(
model_instance, slug_field, self.slug_generator(original_slug, start))
def pre_save(self, model_instance, add):
value = force_unicode(self.create_slug(model_instance, add))
return value
def get_internal_type(self):
return "SlugField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = '%s.AutoSlugField' % self.__module__
args, kwargs = introspector(self)
kwargs.update({
'populate_from': repr(self._populate_from),
'separator': repr(self.separator),
'overwrite': repr(self.overwrite),
'allow_duplicates': repr(self.allow_duplicates),
})
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(AutoSlugField, self).deconstruct()
kwargs['populate_from'] = self._populate_from
if not self.separator == six.u('-'):
kwargs['separator'] = self.separator
if self.overwrite is not False:
kwargs['overwrite'] = True
if self.allow_duplicates is not False:
kwargs['allow_duplicates'] = True
return name, path, args, kwargs
class RandomCharField(UniqueFieldMixin, CharField):
""" RandomCharField
By default, sets editable=False, blank=True, unique=False.
Required arguments:
length
Specifies the length of the field
Optional arguments:
unique
If set to True, duplicate entries are not allowed (default: False)
lowercase
If set to True, lowercase the alpha characters (default: False)
uppercase
If set to True, uppercase the alpha characters (default: False)
include_alpha
If set to True, include alpha characters (default: True)
include_digits
If set to True, include digit characters (default: True)
include_punctuation
If set to True, include |
pwnbus/scoring_engine | scoring_engine/config.py | Python | mit | 80 | 0 | from scoring_engine.config | _loader import ConfigLoader
con | fig = ConfigLoader()
|
grupydf/grupybr-template | {{cookiecutter.repo_name}}/.plugins/sitemap/__init__.py | Python | gpl-3.0 | 22 | 0.045455 | from | .sitemap impor | t * |
nexdatas/tools | test/NXSCreateStdCompDBE_test.py | Python | gpl-3.0 | 1,642 | 0 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2018 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute | it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE | SS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file XMLConfigurator_test.py
# unittests for field Tags running Tango Server
#
import unittest
import sys
try:
import NXSCreateStdCompDBR_test
except Exception:
from . import NXSCreateStdCompDBR_test
if sys.version_info > (3,):
unicode = str
long = int
# test fixture
class NXSCreateStdCompDBETest(
NXSCreateStdCompDBR_test.NXSCreateStdCompDBRTest):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
NXSCreateStdCompDBR_test.NXSCreateStdCompDBRTest.__init__(
self, methodName)
self.flags = " -b -r testp09/testmcs/testr228 " \
"-e aatestp09/testmcs2/testr228 "
self.device = 'aatestp09/testmcs2/testr228'
if __name__ == '__main__':
unittest.main()
|
badassdatascience/pyDome | pyDome.py | Python | gpl-3.0 | 5,599 | 0.020897 | # pyDome: A geodesic dome calculator
# Copyright (C) 2013 Daniel Williams
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# load useful modules
#
import numpy as np
import getopt
import sys
#
# load pyDome modules
#
from Polyhedral import *
from SymmetryTriangle import *
from GeodesicSphere import *
from Output import *
from Truncation import *
from BillOfMaterials import *
def display_help():
print
print 'pyDome: A geodesic dome calculator. Copyright 2013 by Daniel Williams'
print
print 'Required Command-Line Input:'
print
print '\t-o, --output=\tPath to output file(s). Extensions will be added. Generates DXF and WRL files by default, but only WRL file when "-F" option is active. Example: \"-o output/test\" produces | files output/test.wrl and output/test.dxf.'
print
print 'Options:'
print
print '\t-r, --radius\tRadius of generated dome. Must be floating point. Default 1.0.'
print
print '\t-f, --frequency\tFrequency of generated dome. Must be an integer. Default 4.'
print
print '\t-v, --vthreshold\tDistance required to consider two vertices equal. Defau | lt 0.0000001. Must be floating point.'
print
print '\t-t, --truncation\tDistance (ratio) from the bottom to truncate. Default 0.499999. I advise using only the default or 0.333333. Must be floating point.'
print
print '\t-b, --bom-rounding\tThe number of decimal places to round chord length output in the generated Bill of Materials. Default 5. Must be an integer.'
print
print '\t-p, --polyhedron\tEither \"octahedron\" or \"icosahedron\". Default icosahedron.'
print
print '\t-F, --face\tFlag specifying whether to generate face output in WRL file. Cancels DXF file output and cannot be used with truncation.'
print
def main():
#
# default values
#
radius = np.float64(1.)
frequency = 4
polyhedral = Icosahedron()
vertex_equal_threshold = 0.0000001
truncation_amount = 0.499999
run_truncate = False
bom_rounding_precision = 5
face_output = False
output_path = None
#
# no input arguments
#
if len(sys.argv[1:]) == 0:
display_help()
sys.exit(-1)
#
# parse command line
#
try:
opts, args = getopt.getopt(sys.argv[1:], 'r:f:v:t:b:p:Fo:', ['truncation=', 'vthreshold=', 'radius=', 'frequency=', 'help', 'bom-rounding=', 'polyhedron=', 'face', 'output='])
except getopt.error, msg:
print "for help use --help"
sys.exit(-1)
for o, a in opts:
if o in ('-o', '--output'):
output_path = a
if o in ('-p', '--polyhedron'):
if a == 'octahedron':
polyhedral = Octahedron()
if o in ('-b', '--bom-rounding'):
try:
bom_rounding_precision = int(a)
except:
print '-b or --bom-rounding argument must be an integer. Exiting.'
sys.exit(-1)
if o in ('-h', '--help'):
display_help()
sys.exit(0)
if o in ('-F', '--face'):
face_output = True
if o in ('-r', '--radius'):
try:
a = float(a)
radius = np.float64(a)
except:
print '-r or --radius argument must be a floating point number. Exiting.'
sys.exit(-1)
if o in ('-f', '--frequency'):
try:
frequency = int(a)
except:
print '-f or --frequency argument must be an integer. Exiting.'
sys.exit(-1)
if o in ('-v', '--vthreshold'):
try:
a = float(a)
vertex_equal_threshold = np.float64(a)
except:
print '-v or --vthreshold argument must be a floating point number. Exiting.'
sys.exit(-1)
if o in ('-t', '--truncation'):
try:
a = float(a)
truncation_amount = np.float64(a)
run_truncate = True
except:
print '-t or --truncation argument must be a floating point number. Exiting.'
sys.exit(-1)
#
# check for required options
#
if output_path == None:
print 'An output path and filename is required. Use the -o argument. Exiting.'
sys.exit(-1)
#
# check for mutually exclusive options
#
if face_output and run_truncate:
print 'Truncation does not work with face output at this time. Use either -t or -F but not both.'
exit(-1)
#
# generate geodesic sphere
#
symmetry_triangle = ClassOneMethodOneSymmetryTriangle(frequency, polyhedral)
sphere = GeodesicSphere(polyhedral, symmetry_triangle, vertex_equal_threshold, radius)
C_sphere = sphere.non_duplicate_chords
F_sphere = sphere.non_duplicate_face_nodes
V_sphere = sphere.sphere_vertices
#
# truncate
#
V = V_sphere
C = C_sphere
if run_truncate:
V, C = truncate(V_sphere, C_sphere, truncation_amount)
#
# write model output
#
if face_output:
OutputFaceVRML(V, F_sphere, output_path + '.wrl')
else:
OutputWireframeVRML(V, C, output_path + '.wrl')
OutputDXF(V, C, output_path + '.dxf')
#
# bill of materials
#
get_bill_of_materials(V, C, bom_rounding_precision)
#
# run the main function
#
if __name__ == "__main__":
main()
|
AlexStarov/Shop | applications/ajax/callback.py | Python | apache-2.0 | 6,232 | 0.001174 | # -*- coding: utf-8 -*-
__author__ = 'Alex Starov'
try:
from django.utils.simplejson import dumps
# import simplejson as json
except ImportError:
from json import dumps
# import json
from django.http import HttpResponse
def callback_data_send(request, ):
if request.is_ajax():
if request.method == 'POST':
# request_cookie = request.session.get(u'cookie', None, )
# if request_cookie:
sessionid = request.POST.get(u'sessionid', None, )
print('CallBack:', )
print('sessionid: ', sessionid, )
userid = request.POST.get(u'userid', False, )
print('userid: ', userid, )
print('userid type: ', type(userid, ), )
if userid == 'None':
userid = False
name = request.POST.get(u'name', None, )
print('name: ', name.encode('utf8', ), )
email = request.POST.get(u'email', None, )
print('email: ', email, )
phone = request.POST.get(u'phone', None, )
print('phone: ', phone, )
from applications.callback.models import CallBack
try:
if userid:
""" Error: invalid literal for int() with base 10: 'None' """
""" Ошибка вылазила из за того, что я пытался подсунуть вместо int() в user_id - None """
print(userid, )
callback = CallBack.objects.create(sessionid=sessionid,
user_id=userid,
name=name,
email=email,
phone=phone, )
else:
callback = CallBack.objects.create(sessionid=sessionid,
name=name,
email=email,
phone=phone, )
except Exception as e:
print('Exception: ', e, )
print('Exception message: ', e.message, )
response = {'result': 'Bad',
'error': e.message, }
data = dumps(response, )
mimetype = 'application/javascript'
| return HttpResponse(data, mimetype, )
else:
print(callback, )
""" Отправка заказа обратного звонка """
subject = u'Заказ обратного звонка от пользователя: %s на номер: %s. Интернет магазин Кексик.' % (name, phone, )
from django.template.loader import render_to_string
html_con | tent = render_to_string('email_request_callback_content.html',
{'name': name,
'email': email,
'phone': phone, }, )
from django.utils.html import strip_tags
text_content = strip_tags(html_content, )
from_email = u'Интерент магазин Кексик <site@keksik.com.ua>'
from django.core.mail import get_connection
backend = get_connection(backend='django.core.mail.backends.smtp.EmailBackend',
fail_silently=False, )
from django.core.mail import EmailMultiAlternatives
from proj.settings import Email_MANAGER
msg = EmailMultiAlternatives(subject=subject,
body=text_content,
from_email=from_email,
to=[Email_MANAGER, ],
connection=backend, )
msg.attach_alternative(content=html_content,
mimetype="text/html", )
msg.content_subtype = "html"
msg.send(fail_silently=False, )
""" Отправка благодарности клиенту. """
subject = u'Ваш заказ обратного звонка с сайта принят. Интернет магазин Кексик.'
html_content = render_to_string('email_successful_request_callback_content.html', )
text_content = strip_tags(html_content, )
# from_email = u'site@keksik.com.ua'
to_email = email
msg = EmailMultiAlternatives(subject=subject,
body=text_content,
from_email=from_email,
to=[to_email, ],
connection=backend, )
msg.attach_alternative(content=html_content,
mimetype="text/html", )
from smtplib import SMTPSenderRefused, SMTPDataError
try:
msg.send(fail_silently=False, )
except SMTPSenderRefused as e:
response = {'result': 'Bad',
'error': e, }
else:
response = {'result': 'Ok', }
data = dumps(response, )
mimetype = 'application/javascript'
return HttpResponse(data, mimetype, )
# else:
# response = {'result': 'Bad',
# 'error': u'Вы только-что зашли на сайт!!!', }
# data = dumps(response, )
# mimetype = 'application/javascript'
# return HttpResponse(data, mimetype, )
elif request.method == 'GET':
return HttpResponse(status=400, )
else:
return HttpResponse(status=400, )
else:
return HttpResponse(status=400, )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.