code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
import sys
from collections import deque
class TreeNode(object):
def __init__(self, data=None):
self.data = data
self.left = self.right = None
for line in sys.stdin:
nodes = line.strip().split()
root = TreeNode()
for e in nodes:
print(e)
e = e.strip('()')
if e == '':
continue
data, pos = e.split(',')
currNode = root
for p in pos:
if p == 'L':
if currNode.left:
currNode = currNode.left
else:
currNode.left = TreeNode()
currNode = currNode.left
else:
if currNode.right:
currNode = currNode.right
else:
currNode.right = TreeNode()
currNode = currNode.right
currNode.data = data
result = []
currLevel = deque([root])
nextLevel = deque()
while currLevel:
e = currLevel.popleft()
if e.data:
result.append(e.data)
if e.left:
nextLevel.append(e.left)
if e.right:
nextLevel.append(e.right)
if not currLevel:
currLevel, nextLevel = nextLevel, currLevel
print(' '.join(result))
| Blimeo/Java | out/production/matthew/Contests/ICPC/Volume1/p122.py | Python | apache-2.0 | 1,315 |
"""
hashdd_file_absolute_path.py
@brad_anton
License:
Copyright 2015 hashdd.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .feature import feature
import os
class hashdd_file_absolute_path(feature):
def process(self):
if self.filename:
return [ os.path.abspath(self.filename) ]
else:
return [ None ]
| hashdd/pyhashdd | hashdd/features/hashdd_file_absolute_path.py | Python | apache-2.0 | 837 |
import datetime
import logging
import os
import unittest
from unittest.mock import patch, MagicMock
import requests_mock
from freezegun import freeze_time
from tests.helper_func import get_fixture, load_fixture_config
from yatcobot.client import TwitterClient, TwitterClientException, TwitterClientRetweetedException, \
RateLimiter, RateLimiterExpired
logging.disable(logging.ERROR)
class TestTwitterClient(unittest.TestCase):
"""Tests for TwitterClient class"""
tests_path = path = os.path.dirname(os.path.abspath(__file__))
@requests_mock.mock()
def setUp(self, m):
load_fixture_config()
response = get_fixture('application_rate_limit_status.json', True)
m.get('https://api.twitter.com/1.1/application/rate_limit_status.json', text=response)
self.client = TwitterClient('Consumer Key', "Consumer Secret", "Access Key", "Access Secret")
@requests_mock.mock()
def test_search_tweets(self, m):
response = get_fixture('search_tweets.json', True)
m.get('https://api.twitter.com/1.1/search/tweets.json?q=210462857140252672&result_type=mixed&count=50',
text=response)
r = self.client.search_tweets("210462857140252672", 50)
self.assertEqual(len(r), 4)
@requests_mock.mock()
def test_search_tweets_with_language(self, m):
response = get_fixture('search_tweets.json', True)
m.get('https://api.twitter.com/1.1/search/tweets.json?&lang=en&q=210462857140252672&result_type=mixed&count=50',
text=response)
r = self.client.search_tweets("210462857140252672", 50, language="en")
self.assertEqual(len(r), 4)
@requests_mock.mock()
def test_update(self, m):
response = get_fixture('statuses_update_reply.json', True)
m.post('https://api.twitter.com/1.1/statuses/update.json', text=response)
self.client.update('test', 2)
self.assertTrue(m.called)
self.assertEqual(m.call_count, 1)
history = m.request_history[0]
self.assertEqual(history.method, 'POST')
self.assertIn('test', history.text)
self.assertIn('2', history.text)
@requests_mock.mock()
def test_retweet(self, m):
response = get_fixture('statuses_retweet.json', True)
m.post('https://api.twitter.com/1.1/statuses/retweet/241259202004267009.json', text=response)
r = self.client.retweet("241259202004267009")
self.assertEqual(r['retweeted_status']['id'], 241259202004267009)
@requests_mock.mock()
def test_retweet_already_retweeted(self, m):
response = get_fixture('error_already_retweeted.json', True)
m.post('https://api.twitter.com/1.1/statuses/retweet/241259202004267009.json', text=response)
with self.assertRaises(TwitterClientRetweetedException):
self.client.retweet("241259202004267009")
@requests_mock.mock()
def test_get_tweet(self, m):
response = get_fixture('statuses_show.json', True)
m.get('https://api.twitter.com/1.1/statuses/show/210462857140252672.json', text=response)
r = self.client.get_tweet("210462857140252672")
self.assertEqual(r['id'], 210462857140252672)
@requests_mock.mock()
def test_get_friends_ids(self, m):
response = get_fixture('friends_ids.json', True)
m.get('https://api.twitter.com/1.1/friends/ids.json', text=response)
r = self.client.get_friends_ids()
self.assertEqual(len(r), 31)
@requests_mock.mock()
def test_follow(self, m):
response = get_fixture('friendship_create.json', True)
m.post('https://api.twitter.com/1.1/friendships/create.json', text=response)
r = self.client.follow(1401881)
self.assertEqual(r['id'], 1401881)
@requests_mock.mock()
def test_unfollow(self, m):
response = get_fixture('friendship_create.json', True)
m.post('https://api.twitter.com/1.1/friendships/destroy.json', text=response)
r = self.client.unfollow(1401881)
self.assertEqual(r['id'], 1401881)
@requests_mock.mock()
def test_favorite(self, m):
response = get_fixture('favorites_create.json', True)
m.post('https://api.twitter.com/1.1/favorites/create.json', text=response)
r = self.client.favorite(243138128959913986)
self.assertEqual(r['id'], 243138128959913986)
@requests_mock.mock()
def test_get_blocks(self, m):
response = get_fixture('blocks_ids.json', True)
m.get('https://api.twitter.com/1.1/blocks/ids.json', text=response)
r = self.client.get_blocks()
self.assertEqual(len(r), 1)
@requests_mock.mock()
def test_get_mentions_timeline(self, m):
response = get_fixture('statuses_mentions_timeline.json', True)
m.get('https://api.twitter.com/1.1/statuses/mentions_timeline.json', text=response)
r = self.client.get_mentions_timeline()
self.assertEqual(len(r), 2)
@requests_mock.mock()
def test_get_mentions_timeline_since_id(self, m):
response = get_fixture('statuses_mentions_timeline_since_id.json', True)
m.get('https://api.twitter.com/1.1/statuses/mentions_timeline.json?since_id=653965849364180992', text=response)
r = self.client.get_mentions_timeline(since_id=653965849364180992)
self.assertEqual(len(r), 1)
@requests_mock.mock()
def test_get_mentions_timeline_count_1(self, m):
response = get_fixture('statuses_mentions_timeline_count_1.json', True)
m.get('https://api.twitter.com/1.1/statuses/mentions_timeline.json?count=1', text=response)
r = self.client.get_mentions_timeline(count=1)
self.assertEqual(len(r), 1)
@requests_mock.mock()
def test_update_ratelimits(self, m):
# revert original function
response = get_fixture('application_rate_limit_status.json', True)
m.get('https://api.twitter.com/1.1/application/rate_limit_status.json', text=response)
self.client.update_ratelimits(False)
self.assertEqual(len(self.client.ratelimiter), 80)
# check if percent is computed
for x in self.client.ratelimiter.values():
self.assertIn('percent', x)
self.assertEqual(x['limit'] / 100 * x['percent'], x['remaining'])
@requests_mock.mock()
def test_api_call_error(self, m):
response = get_fixture('error.json', True)
m.get(requests_mock.ANY, text=response)
with self.assertRaises(TwitterClientException):
self.client._api_call('blocks/ids')
@requests_mock.mock()
def test_api_call_no_check_ratelimits(self, m):
response = get_fixture('blocks_ids.json', True)
m.get('https://api.twitter.com/1.1/blocks/ids.json', text=response)
self.client.ratelimiter.check_limit = MagicMock()
self.client._api_call('blocks/ids', check_ratelimit=False)
self.assertFalse(self.client.ratelimiter.check_limit.called)
@requests_mock.mock()
def test_api_call_decrease_remaining_calls(self, m):
response = get_fixture('blocks_ids.json', True)
m.get('https://api.twitter.com/1.1/blocks/ids.json', text=response)
self.client.ratelimiter.check_limit = MagicMock()
before_remaining = self.client.ratelimiter['/blocks/ids']['remaining']
self.client._api_call('blocks/ids')
self.assertEqual(before_remaining - 1, self.client.ratelimiter['/blocks/ids']['remaining'])
class TestRatelimiter(unittest.TestCase):
tests_path = path = os.path.dirname(os.path.abspath(__file__))
ratelimits_full = {'/collections/list': {'limit': 1000, 'reset': 1443529669, 'percent': 100.0, 'remaining': 1000},
'/followers/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/contacts/addressbook': {'limit': 300, 'reset': 1443529669, 'percent': 100.0, 'remaining': 300},
'/account/update_profile': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/search/tweets': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/users/lookup': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/saved_searches/destroy/:id': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/users/profile_banner': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/lists/subscribers': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/help/tos': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/trends/place': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/application/rate_limit_status': {'limit': 180, 'reset': 1443529669,
'percent': 99.44444444444444, 'remaining': 179},
'/friends/following/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/ownerships': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/account/login_verification_enrollment': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/contacts/users_and_uploaded_by': {'limit': 300, 'reset': 1443529669, 'percent': 100.0,
'remaining': 300},
'/users/derived_info': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/friends': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/user_timeline': {'limit': 180, 'reset': 1443529669, 'percent': 100.0,
'remaining': 180},
'/lists/subscriptions': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/saved_searches/show/:id': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/users/show/:id': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/direct_messages/sent': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/help/privacy': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/friendships/incoming': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/home_timeline': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/retweeters/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/geo/similar_places': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/statuses': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/geo/id/:place_id': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/oembed': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/geo/search': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/device/token': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/collections/entries': {'limit': 1000, 'reset': 1443529669, 'percent': 100.0,
'remaining': 1000},
'/friendships/outgoing': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/members': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/followers/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/retweets/:id': {'limit': 60, 'reset': 1443529669, 'percent': 100.0, 'remaining': 60},
'/contacts/delete/status': {'limit': 300, 'reset': 1443529669, 'percent': 100.0,
'remaining': 300},
'/users/suggestions/:slug': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/friendships/show': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/lists/members/show': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/mutes/users/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/account/settings': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/contacts/users': {'limit': 300, 'reset': 1443529669, 'percent': 100.0, 'remaining': 300},
'/friendships/no_retweets/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/direct_messages': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/subscribers/show': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/retweets_of_me': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/moments/permissions': {'limit': 300, 'reset': 1443529669, 'percent': 100.0, 'remaining': 300},
'/statuses/lookup': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/blocks/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/statuses/show/:id': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/users/suggestions/:slug/members': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/trends/available': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/mutes/users/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/blocks/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/users/search': {'limit': 180, 'reset': 1443529669, 'percent': 100.0, 'remaining': 180},
'/geo/reverse_geocode': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/contacts/uploaded_by': {'limit': 300, 'reset': 1443529669, 'percent': 100.0, 'remaining': 300},
'/statuses/mentions_timeline': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/friends/following/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/help/settings': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/direct_messages/show': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/memberships': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/help/configuration': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/favorites/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/direct_messages/sent_and_received': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/users/report_spam': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/friends/ids': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/friendships/lookup': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/account/verify_credentials': {'limit': 15, 'reset': 1443529669, 'percent': 100.0,
'remaining': 15},
'/friends/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/collections/show': {'limit': 1000, 'reset': 1443529669, 'percent': 100.0, 'remaining': 1000},
'/help/languages': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/saved_searches/list': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/lists/show': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/users/suggestions': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15},
'/trends/closest': {'limit': 15, 'reset': 1443529669, 'percent': 100.0, 'remaining': 15}}
ratelimits_empty = {'/geo/search': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/saved_searches/destroy/:id': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/friends': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/device/token': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/friends/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/home_timeline': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/users/suggestions': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/contacts/delete/status': {'reset': 100, 'percent': 0, 'limit': 300, 'remaining': 0},
'/friendships/outgoing': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/users/suggestions/:slug/members': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/friends/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/account/login_verification_enrollment': {'reset': 100, 'percent': 0, 'limit': 15,
'remaining': 0},
'/geo/similar_places': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/ownerships': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/subscriptions': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/blocks/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/direct_messages/sent_and_received': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/contacts/addressbook': {'reset': 100, 'percent': 0, 'limit': 300, 'remaining': 0},
'/users/show/:id': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/contacts/users': {'reset': 100, 'percent': 0, 'limit': 300, 'remaining': 0},
'/account/settings': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/trends/closest': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/members': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/users/report_spam': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/followers/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/account/verify_credentials': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/saved_searches/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/retweeters/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/direct_messages/sent': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/users/profile_banner': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/favorites/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/mutes/users/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/oembed': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/users/derived_info': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/help/languages': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/mutes/users/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/lookup': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/help/settings': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/show': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/trends/available': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/subscribers': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/friendships/no_retweets/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/friends/following/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/trends/place': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/geo/id/:place_id': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/contacts/uploaded_by': {'reset': 100, 'percent': 0, 'limit': 300, 'remaining': 0},
'/lists/members/show': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/friendships/lookup': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/search/tweets': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/collections/entries': {'reset': 100, 'percent': 0, 'limit': 1000, 'remaining': 0},
'/friendships/show': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/application/rate_limit_status': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/statuses/show/:id': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/friends/following/ids': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/users/search': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/contacts/users_and_uploaded_by': {'reset': 100, 'percent': 0, 'limit': 300, 'remaining': 0},
'/statuses/retweets_of_me': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/geo/reverse_geocode': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/mentions_timeline': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/memberships': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/help/privacy': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/user_timeline': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/help/configuration': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/subscribers/show': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/collections/show': {'reset': 100, 'percent': 0, 'limit': 1000, 'remaining': 0},
'/moments/permissions': {'reset': 100, 'percent': 0, 'limit': 300, 'remaining': 0},
'/account/update_profile': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/blocks/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/direct_messages': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/users/lookup': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/users/suggestions/:slug': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/lists/statuses': {'reset': 100, 'percent': 0, 'limit': 180, 'remaining': 0},
'/help/tos': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/direct_messages/show': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/statuses/retweets/:id': {'reset': 100, 'percent': 0, 'limit': 60, 'remaining': 0},
'/saved_searches/show/:id': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/collections/list': {'reset': 100, 'percent': 0, 'limit': 1000, 'remaining': 0},
'/friendships/incoming': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0},
'/followers/list': {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0}}
def setUp(self):
load_fixture_config()
self.limiter = RateLimiter()
self.limiter.update(self.ratelimits_full)
def test_calculate_percent(self):
self.limiter['/geo/search'] = {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 15}
self.limiter._calculate_percent_remaining('/geo/search')
self.assertEqual(self.limiter['/geo/search']['percent'], 100)
self.limiter['/geo/search'] = {'reset': 100, 'percent': 0, 'limit': 15, 'remaining': 0}
self.limiter._calculate_percent_remaining('/geo/search')
self.assertEqual(self.limiter['/geo/search']['percent'], 0)
def test_update_ratelimits(self):
response = get_fixture('application_rate_limit_status.json')
ratelimiter = RateLimiter()
ratelimiter.refresh_limits(response['resources'])
self.assertEqual(len(ratelimiter), 80)
def test_check_limit_with_no_more_remaining(self):
response = get_fixture('application_rate_limit_status.json')
ratelimiter = RateLimiter()
ratelimiter.refresh_limits(response['resources'])
for limit in ratelimiter.values():
limit['percent'] = 0
limit['remaining'] = 0
limit['reset'] = 100
with freeze_time(datetime.datetime.fromtimestamp(0)):
ratelimiter['/geo/search']['remaining'] = 0
ratelimiter['/geo/search']['percent'] = 0
with patch('time.sleep') as p:
with self.assertRaises(RateLimiterExpired):
ratelimiter.check_limit('geo/search')
p.assert_called_with(100)
def test_decrease_remaining(self):
self.limiter['/geo/search'] = {'reset': 100, 'percent': 0, 'limit': 1, 'remaining': 1}
self.limiter.decrease_remaining('geo/search')
self.assertEqual(self.limiter['/geo/search']['remaining'], 0)
self.assertEqual(self.limiter['/geo/search']['percent'], 0)
| buluba89/Yatcobot | tests/test_twitterclient.py | Python | gpl-2.0 | 28,404 |
#: Operation ids
operations = {}
operations["vote"] = 0
operations["comment"] = 1
operations["transfer"] = 2
operations["transfer_to_vesting"] = 3
operations["withdraw_vesting"] = 4
operations["limit_order_create"] = 5
operations["limit_order_cancel"] = 6
operations["feed_publish"] = 7
operations["convert"] = 8
operations["account_create"] = 9
operations["account_update"] = 10
operations["witness_update"] = 11
operations["account_witness_vote"] = 12
operations["account_witness_proxy"] = 13
operations["pow"] = 14
operations["custom"] = 15
operations["report_over_production"] = 16
operations["delete_comment"] = 17
operations["custom_json"] = 18
operations["comment_options"] = 19
operations["set_withdraw_vesting_route"] = 20
operations["limit_order_create2"] = 21
operations["challenge_authority"] = 22
operations["prove_authority"] = 23
operations["request_account_recovery"] = 24
operations["recover_account"] = 25
operations["change_recovery_account"] = 26
operations["escrow_transfer"] = 27
operations["escrow_dispute"] = 28
operations["escrow_release"] = 29
operations["pow2"] = 30
operations["escrow_approve"] = 31
operations["transfer_to_savings"] = 32
operations["transfer_from_savings"] = 33
operations["cancel_transfer_from_savings"] = 34
operations["custom_binary"] = 35
operations["decline_voting_rights"] = 36
operations["reset_account"] = 37
operations["set_reset_account"] = 38
operations["claim_reward_balance"] = 39
operations["delegate_vesting_shares"] = 40
operations["account_create_with_delegation"] = 41
operations["fill_convert_request"] = 42
operations["author_reward"] = 43
operations["curation_reward"] = 44
operations["comment_reward"] = 45
operations["liquidity_reward"] = 46
operations["interest"] = 47
operations["fill_vesting_withdraw"] = 48
operations["fill_order"] = 49
operations["shutdown_witness"] = 50
operations["fill_transfer_from_savings"] = 51
operations["hardfork"] = 52
operations["comment_payout_update"] = 53
operations["return_vesting_delegation"] = 54
operations["comment_benefactor_reward"] = 55
| xeroc/piston-lib | pistonbase/operationids.py | Python | mit | 2,051 |
import itertools
import os
import random
from collections import defaultdict
from datetime import datetime
from typing import Any, Dict, List, Mapping, Sequence, Tuple
import bmemcached
import orjson
from django.conf import settings
from django.contrib.sessions.models import Session
from django.core.files.base import File
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandParser
from django.db import connection
from django.db.models import F
from django.utils.timezone import now as timezone_now
from django.utils.timezone import timedelta as timezone_timedelta
from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path
from zerver.lib.actions import (
STREAM_ASSIGNMENT_COLORS,
build_message_send_dict,
check_add_realm_emoji,
do_change_user_role,
do_create_realm,
do_send_messages,
do_update_user_custom_profile_data_if_changed,
try_add_realm_custom_profile_field,
try_add_realm_default_custom_profile_field,
)
from zerver.lib.bulk_create import bulk_create_streams
from zerver.lib.cache import cache_set
from zerver.lib.generate_test_data import create_test_data, generate_topics
from zerver.lib.onboarding import create_if_missing_realm_internal_bots
from zerver.lib.push_notifications import logger as push_notifications_logger
from zerver.lib.server_initialization import create_internal_realm, create_users
from zerver.lib.storage import static_path
from zerver.lib.types import ProfileFieldData
from zerver.lib.url_preview.preview import CACHE_NAME as PREVIEW_CACHE_NAME
from zerver.lib.user_groups import create_user_group
from zerver.lib.users import add_service
from zerver.lib.utils import generate_api_key
from zerver.models import (
AlertWord,
Client,
CustomProfileField,
DefaultStream,
Draft,
Huddle,
Message,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
Recipient,
Service,
Stream,
Subscription,
UserMessage,
UserPresence,
UserProfile,
get_client,
get_huddle,
get_realm,
get_stream,
get_user,
get_user_by_delivery_email,
get_user_profile_by_id,
)
settings.USING_TORNADO = False
# Disable using memcached caches to avoid 'unsupported pickle
# protocol' errors if `populate_db` is run with a different Python
# from `run-dev.py`.
default_cache = settings.CACHES["default"]
settings.CACHES["default"] = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
}
DEFAULT_EMOJIS = [
("+1", "1f44d"),
("smiley", "1f603"),
("eyes", "1f440"),
("crying_cat_face", "1f63f"),
("arrow_up", "2b06"),
("confetti_ball", "1f38a"),
("hundred_points", "1f4af"),
]
def clear_database() -> None:
# Hacky function only for use inside populate_db. Designed to
# allow running populate_db repeatedly in series to work without
# flushing memcached or clearing the database manually.
# With `zproject.test_settings`, we aren't using real memcached
# and; we only need to flush memcached if we're populating a
# database that would be used with it (i.e. zproject.dev_settings).
if default_cache["BACKEND"] == "django_bmemcached.memcached.BMemcached":
bmemcached.Client(
(default_cache["LOCATION"],),
**default_cache["OPTIONS"],
).flush_all()
model: Any = None # Hack because mypy doesn't know these are model classes
for model in [
Message,
Stream,
UserProfile,
Recipient,
Realm,
Subscription,
Huddle,
UserMessage,
Client,
DefaultStream,
]:
model.objects.all().delete()
Session.objects.all().delete()
def subscribe_users_to_streams(realm: Realm, stream_dict: Dict[str, Dict[str, Any]]) -> None:
subscriptions_to_add = []
event_time = timezone_now()
all_subscription_logs = []
profiles = UserProfile.objects.select_related().filter(realm=realm)
for i, stream_name in enumerate(stream_dict):
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
for profile in profiles:
# Subscribe to some streams.
s = Subscription(
recipient=recipient,
user_profile=profile,
is_user_active=profile.is_active,
color=STREAM_ASSIGNMENT_COLORS[i % len(STREAM_ASSIGNMENT_COLORS)],
)
subscriptions_to_add.append(s)
log = RealmAuditLog(
realm=profile.realm,
modified_user=profile,
modified_stream=stream,
event_last_message_id=0,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time,
)
all_subscription_logs.append(log)
Subscription.objects.bulk_create(subscriptions_to_add)
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def create_alert_words(realm_id: int) -> None:
user_ids = UserProfile.objects.filter(
realm_id=realm_id,
is_bot=False,
is_active=True,
).values_list("id", flat=True)
alert_words = [
"algorithms",
"complexity",
"founded",
"galaxy",
"grammar",
"illustrious",
"natural",
"objective",
"people",
"robotics",
"study",
]
recs: List[AlertWord] = []
for user_id in user_ids:
random.shuffle(alert_words)
for i in range(4):
recs.append(
AlertWord(
realm_id=realm_id,
user_profile_id=user_id,
word=alert_words[i],
)
)
AlertWord.objects.bulk_create(recs)
class Command(BaseCommand):
help = "Populate a test database"
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
"-n", "--num-messages", type=int, default=500, help="The number of messages to create."
)
parser.add_argument(
"-b",
"--batch-size",
type=int,
default=1000,
help="How many messages to process in a single batch",
)
parser.add_argument(
"--extra-users", type=int, default=0, help="The number of extra users to create"
)
parser.add_argument(
"--extra-bots", type=int, default=0, help="The number of extra bots to create"
)
parser.add_argument(
"--extra-streams", type=int, default=0, help="The number of extra streams to create"
)
parser.add_argument("--max-topics", type=int, help="The number of maximum topics to create")
parser.add_argument(
"--huddles",
dest="num_huddles",
type=int,
default=3,
help="The number of huddles to create.",
)
parser.add_argument(
"--personals",
dest="num_personals",
type=int,
default=6,
help="The number of personal pairs to create.",
)
parser.add_argument("--threads", type=int, default=1, help="The number of threads to use.")
parser.add_argument(
"--percent-huddles",
type=float,
default=15,
help="The percent of messages to be huddles.",
)
parser.add_argument(
"--percent-personals",
type=float,
default=15,
help="The percent of messages to be personals.",
)
parser.add_argument(
"--stickyness",
type=float,
default=20,
help="The percent of messages to repeat recent folks.",
)
parser.add_argument(
"--nodelete",
action="store_false",
dest="delete",
help="Whether to delete all the existing messages.",
)
parser.add_argument(
"--test-suite",
action="store_true",
help="Configures populate_db to create a deterministic "
"data set for the backend tests.",
)
def handle(self, *args: Any, **options: Any) -> None:
# Suppress spammy output from the push notifications logger
push_notifications_logger.disabled = True
if options["percent_huddles"] + options["percent_personals"] > 100:
self.stderr.write("Error! More than 100% of messages allocated.\n")
return
# Get consistent data for backend tests.
if options["test_suite"]:
random.seed(0)
with connection.cursor() as cursor:
# Sometimes bugs relating to confusing recipient.id for recipient.type_id
# or <object>.id for <object>.recipient_id remain undiscovered by the test suite
# due to these numbers happening to coincide in such a way that it makes tests
# accidentally pass. By bumping the Recipient.id sequence by a large enough number,
# we can have those ids in a completely different range of values than object ids,
# eliminatng the possibility of such coincidences.
cursor.execute("SELECT setval('zerver_recipient_id_seq', 100)")
# If max_topics is not set, we set it proportional to the
# number of messages.
if options["max_topics"] is None:
options["max_topics"] = 1 + options["num_messages"] // 100
if options["delete"]:
# Start by clearing all the data in our database
clear_database()
# Create our three default realms
# Could in theory be done via zerver.lib.actions.do_create_realm, but
# welcome-bot (needed for do_create_realm) hasn't been created yet
create_internal_realm()
zulip_realm = do_create_realm(
string_id="zulip",
name="Zulip Dev",
emails_restricted_to_domains=False,
email_address_visibility=Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
description="The Zulip development environment default organization."
" It's great for testing!",
invite_required=False,
plan_type=Realm.SELF_HOSTED,
org_type=Realm.ORG_TYPES["business"]["id"],
)
RealmDomain.objects.create(realm=zulip_realm, domain="zulip.com")
assert zulip_realm.notifications_stream is not None
zulip_realm.notifications_stream.name = "Verona"
zulip_realm.notifications_stream.description = "A city in Italy"
zulip_realm.notifications_stream.save(update_fields=["name", "description"])
if options["test_suite"]:
mit_realm = do_create_realm(
string_id="zephyr",
name="MIT",
emails_restricted_to_domains=True,
invite_required=False,
plan_type=Realm.SELF_HOSTED,
org_type=Realm.ORG_TYPES["business"]["id"],
)
RealmDomain.objects.create(realm=mit_realm, domain="mit.edu")
lear_realm = do_create_realm(
string_id="lear",
name="Lear & Co.",
emails_restricted_to_domains=False,
invite_required=False,
plan_type=Realm.SELF_HOSTED,
org_type=Realm.ORG_TYPES["business"]["id"],
)
# Default to allowing all members to send mentions in
# large streams for the test suite to keep
# mention-related tests simple.
zulip_realm.wildcard_mention_policy = Realm.WILDCARD_MENTION_POLICY_MEMBERS
zulip_realm.save(update_fields=["wildcard_mention_policy"])
# Create test Users (UserProfiles are automatically created,
# as are subscriptions to the ability to receive personals).
names = [
("Zoe", "ZOE@zulip.com"),
("Othello, the Moor of Venice", "othello@zulip.com"),
("Iago", "iago@zulip.com"),
("Prospero from The Tempest", "prospero@zulip.com"),
("Cordelia, Lear's daughter", "cordelia@zulip.com"),
("King Hamlet", "hamlet@zulip.com"),
("aaron", "AARON@zulip.com"),
("Polonius", "polonius@zulip.com"),
("Desdemona", "desdemona@zulip.com"),
("शिव", "shiva@zulip.com"),
]
# For testing really large batches:
# Create extra users with semi realistic names to make search
# functions somewhat realistic. We'll still create 1000 users
# like Extra222 User for some predicability.
num_names = options["extra_users"]
num_boring_names = 300
for i in range(min(num_names, num_boring_names)):
full_name = f"Extra{i:03} User"
names.append((full_name, f"extrauser{i}@zulip.com"))
if num_names > num_boring_names:
fnames = [
"Amber",
"Arpita",
"Bob",
"Cindy",
"Daniela",
"Dan",
"Dinesh",
"Faye",
"François",
"George",
"Hank",
"Irene",
"James",
"Janice",
"Jenny",
"Jill",
"John",
"Kate",
"Katelyn",
"Kobe",
"Lexi",
"Manish",
"Mark",
"Matt",
"Mayna",
"Michael",
"Pete",
"Peter",
"Phil",
"Phillipa",
"Preston",
"Sally",
"Scott",
"Sandra",
"Steve",
"Stephanie",
"Vera",
]
mnames = ["de", "van", "von", "Shaw", "T."]
lnames = [
"Adams",
"Agarwal",
"Beal",
"Benson",
"Bonita",
"Davis",
"George",
"Harden",
"James",
"Jones",
"Johnson",
"Jordan",
"Lee",
"Leonard",
"Singh",
"Smith",
"Patel",
"Towns",
"Wall",
]
non_ascii_names = [
"Günter",
"أحمد",
"Magnús",
"आशी",
"イツキ",
"语嫣",
"அருண்",
"Александр",
"José",
]
# to imitate emoji insertions in usernames
raw_emojis = ["😎", "😂", "🐱👤"]
for i in range(num_boring_names, num_names):
fname = random.choice(fnames) + str(i)
full_name = fname
if random.random() < 0.7:
if random.random() < 0.3:
full_name += " " + random.choice(non_ascii_names)
else:
full_name += " " + random.choice(mnames)
if random.random() < 0.1:
full_name += f" {random.choice(raw_emojis)} "
else:
full_name += " " + random.choice(lnames)
email = fname.lower() + "@zulip.com"
names.append((full_name, email))
create_users(zulip_realm, names, tos_version=settings.TOS_VERSION)
iago = get_user_by_delivery_email("iago@zulip.com", zulip_realm)
do_change_user_role(iago, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
iago.is_staff = True
iago.save(update_fields=["is_staff"])
# We need to create at least two test draft for Iago for the sake
# of the cURL tests. Two since one will be deleted.
Draft.objects.create(
user_profile=iago,
recipient=None,
topic="Release Notes",
content="Release 4.0 will contain ...",
last_edit_time=datetime.now(),
)
Draft.objects.create(
user_profile=iago,
recipient=None,
topic="Release Notes",
content="Release 4.0 will contain many new features such as ... ",
last_edit_time=datetime.now(),
)
desdemona = get_user_by_delivery_email("desdemona@zulip.com", zulip_realm)
do_change_user_role(desdemona, UserProfile.ROLE_REALM_OWNER, acting_user=None)
shiva = get_user_by_delivery_email("shiva@zulip.com", zulip_realm)
do_change_user_role(shiva, UserProfile.ROLE_MODERATOR, acting_user=None)
guest_user = get_user_by_delivery_email("polonius@zulip.com", zulip_realm)
guest_user.role = UserProfile.ROLE_GUEST
guest_user.save(update_fields=["role"])
# These bots are directly referenced from code and thus
# are needed for the test suite.
zulip_realm_bots = [
("Zulip Error Bot", "error-bot@zulip.com"),
("Zulip Default Bot", "default-bot@zulip.com"),
]
for i in range(options["extra_bots"]):
zulip_realm_bots.append((f"Extra Bot {i}", f"extrabot{i}@zulip.com"))
create_users(zulip_realm, zulip_realm_bots, bot_type=UserProfile.DEFAULT_BOT)
zoe = get_user_by_delivery_email("zoe@zulip.com", zulip_realm)
zulip_webhook_bots = [
("Zulip Webhook Bot", "webhook-bot@zulip.com"),
]
# If a stream is not supplied in the webhook URL, the webhook
# will (in some cases) send the notification as a PM to the
# owner of the webhook bot, so bot_owner can't be None
create_users(
zulip_realm,
zulip_webhook_bots,
bot_type=UserProfile.INCOMING_WEBHOOK_BOT,
bot_owner=zoe,
)
aaron = get_user_by_delivery_email("AARON@zulip.com", zulip_realm)
zulip_outgoing_bots = [
("Outgoing Webhook", "outgoing-webhook@zulip.com"),
]
create_users(
zulip_realm,
zulip_outgoing_bots,
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
bot_owner=aaron,
)
outgoing_webhook = get_user("outgoing-webhook@zulip.com", zulip_realm)
add_service(
"outgoing-webhook",
user_profile=outgoing_webhook,
interface=Service.GENERIC,
base_url="http://127.0.0.1:5002",
token=generate_api_key(),
)
# Add the realm internal bots to each realm.
create_if_missing_realm_internal_bots()
# Create public streams.
signups_stream = Realm.INITIAL_PRIVATE_STREAM_NAME
stream_list = [
"Verona",
"Denmark",
"Scotland",
"Venice",
"Rome",
signups_stream,
]
stream_dict: Dict[str, Dict[str, Any]] = {
"Denmark": {"description": "A Scandinavian country"},
"Scotland": {"description": "Located in the United Kingdom"},
"Venice": {"description": "A northeastern Italian city"},
"Rome": {"description": "Yet another Italian city", "is_web_public": True},
}
bulk_create_streams(zulip_realm, stream_dict)
recipient_streams: List[int] = [
Stream.objects.get(name=name, realm=zulip_realm).id for name in stream_list
]
# Create subscriptions to streams. The following
# algorithm will give each of the users a different but
# deterministic subset of the streams (given a fixed list
# of users). For the test suite, we have a fixed list of
# subscriptions to make sure test data is consistent
# across platforms.
subscriptions_list: List[Tuple[UserProfile, Recipient]] = []
profiles: Sequence[UserProfile] = list(
UserProfile.objects.select_related().filter(is_bot=False).order_by("email")
)
if options["test_suite"]:
subscriptions_map = {
"AARON@zulip.com": ["Verona"],
"cordelia@zulip.com": ["Verona"],
"hamlet@zulip.com": ["Verona", "Denmark", signups_stream],
"iago@zulip.com": [
"Verona",
"Denmark",
"Scotland",
signups_stream,
],
"othello@zulip.com": ["Verona", "Denmark", "Scotland"],
"prospero@zulip.com": ["Verona", "Denmark", "Scotland", "Venice"],
"ZOE@zulip.com": ["Verona", "Denmark", "Scotland", "Venice", "Rome"],
"polonius@zulip.com": ["Verona"],
"desdemona@zulip.com": [
"Verona",
"Denmark",
"Venice",
signups_stream,
],
"shiva@zulip.com": ["Verona", "Denmark", "Scotland"],
}
for profile in profiles:
email = profile.delivery_email
if email not in subscriptions_map:
raise Exception(f"Subscriptions not listed for user {email}")
for stream_name in subscriptions_map[email]:
stream = Stream.objects.get(name=stream_name, realm=zulip_realm)
r = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscriptions_list.append((profile, r))
else:
num_streams = len(recipient_streams)
num_users = len(profiles)
for i, profile in enumerate(profiles):
# Subscribe to some streams.
fraction = float(i) / num_users
num_recips = int(num_streams * fraction) + 1
for type_id in recipient_streams[:num_recips]:
r = Recipient.objects.get(type=Recipient.STREAM, type_id=type_id)
subscriptions_list.append((profile, r))
subscriptions_to_add: List[Subscription] = []
event_time = timezone_now()
all_subscription_logs: (List[RealmAuditLog]) = []
i = 0
for profile, recipient in subscriptions_list:
i += 1
color = STREAM_ASSIGNMENT_COLORS[i % len(STREAM_ASSIGNMENT_COLORS)]
s = Subscription(
recipient=recipient,
user_profile=profile,
is_user_active=profile.is_active,
color=color,
)
subscriptions_to_add.append(s)
log = RealmAuditLog(
realm=profile.realm,
modified_user=profile,
modified_stream_id=recipient.type_id,
event_last_message_id=0,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time,
)
all_subscription_logs.append(log)
Subscription.objects.bulk_create(subscriptions_to_add)
RealmAuditLog.objects.bulk_create(all_subscription_logs)
# Create custom profile field data
phone_number = try_add_realm_custom_profile_field(
zulip_realm, "Phone number", CustomProfileField.SHORT_TEXT, hint=""
)
biography = try_add_realm_custom_profile_field(
zulip_realm,
"Biography",
CustomProfileField.LONG_TEXT,
hint="What are you known for?",
)
favorite_food = try_add_realm_custom_profile_field(
zulip_realm,
"Favorite food",
CustomProfileField.SHORT_TEXT,
hint="Or drink, if you'd prefer",
)
field_data: ProfileFieldData = {
"vim": {"text": "Vim", "order": "1"},
"emacs": {"text": "Emacs", "order": "2"},
}
favorite_editor = try_add_realm_custom_profile_field(
zulip_realm, "Favorite editor", CustomProfileField.SELECT, field_data=field_data
)
birthday = try_add_realm_custom_profile_field(
zulip_realm, "Birthday", CustomProfileField.DATE
)
favorite_website = try_add_realm_custom_profile_field(
zulip_realm,
"Favorite website",
CustomProfileField.URL,
hint="Or your personal blog's URL",
)
mentor = try_add_realm_custom_profile_field(
zulip_realm, "Mentor", CustomProfileField.USER
)
github_profile = try_add_realm_default_custom_profile_field(zulip_realm, "github")
# Fill in values for Iago and Hamlet
hamlet = get_user_by_delivery_email("hamlet@zulip.com", zulip_realm)
do_update_user_custom_profile_data_if_changed(
iago,
[
{"id": phone_number.id, "value": "+1-234-567-8901"},
{"id": biography.id, "value": "Betrayer of Othello."},
{"id": favorite_food.id, "value": "Apples"},
{"id": favorite_editor.id, "value": "emacs"},
{"id": birthday.id, "value": "2000-01-01"},
{"id": favorite_website.id, "value": "https://zulip.readthedocs.io/en/latest/"},
{"id": mentor.id, "value": [hamlet.id]},
{"id": github_profile.id, "value": "zulip"},
],
)
do_update_user_custom_profile_data_if_changed(
hamlet,
[
{"id": phone_number.id, "value": "+0-11-23-456-7890"},
{
"id": biography.id,
"value": "I am:\n* The prince of Denmark\n* Nephew to the usurping Claudius",
},
{"id": favorite_food.id, "value": "Dark chocolate"},
{"id": favorite_editor.id, "value": "vim"},
{"id": birthday.id, "value": "1900-01-01"},
{"id": favorite_website.id, "value": "https://blog.zulig.org"},
{"id": mentor.id, "value": [iago.id]},
{"id": github_profile.id, "value": "zulipbot"},
],
)
else:
zulip_realm = get_realm("zulip")
recipient_streams = [
klass.type_id for klass in Recipient.objects.filter(type=Recipient.STREAM)
]
# Extract a list of all users
user_profiles: List[UserProfile] = list(UserProfile.objects.filter(is_bot=False))
# Create a test realm emoji.
IMAGE_FILE_PATH = static_path("images/test-images/checkbox.png")
with open(IMAGE_FILE_PATH, "rb") as fp:
check_add_realm_emoji(zulip_realm, "green_tick", iago, File(fp))
if not options["test_suite"]:
# Populate users with some bar data
for user in user_profiles:
status: int = UserPresence.ACTIVE
date = timezone_now()
client = get_client("website")
if user.full_name[0] <= "H":
client = get_client("ZulipAndroid")
UserPresence.objects.get_or_create(
user_profile=user,
realm_id=user.realm_id,
client=client,
timestamp=date,
status=status,
)
user_profiles_ids = [user_profile.id for user_profile in user_profiles]
# Create several initial huddles
for i in range(options["num_huddles"]):
get_huddle(random.sample(user_profiles_ids, random.randint(3, 4)))
# Create several initial pairs for personals
personals_pairs = [
random.sample(user_profiles_ids, 2) for i in range(options["num_personals"])
]
create_alert_words(zulip_realm.id)
# Generate a new set of test data.
create_test_data()
# prepopulate the URL preview/embed data for the links present
# in the config.generate_data.json data set. This makes it
# possible for populate_db to run happily without Internet
# access.
with open("zerver/tests/fixtures/docs_url_preview_data.json", "rb") as f:
urls_with_preview_data = orjson.loads(f.read())
for url in urls_with_preview_data:
cache_set(url, urls_with_preview_data[url], PREVIEW_CACHE_NAME)
if options["delete"]:
if options["test_suite"]:
# Create test users; the MIT ones are needed to test
# the Zephyr mirroring codepaths.
testsuite_mit_users = [
("Fred Sipb (MIT)", "sipbtest@mit.edu"),
("Athena Consulting Exchange User (MIT)", "starnine@mit.edu"),
("Esp Classroom (MIT)", "espuser@mit.edu"),
]
create_users(mit_realm, testsuite_mit_users, tos_version=settings.TOS_VERSION)
testsuite_lear_users = [
("King Lear", "king@lear.org"),
("Cordelia, Lear's daughter", "cordelia@zulip.com"),
]
create_users(lear_realm, testsuite_lear_users, tos_version=settings.TOS_VERSION)
if not options["test_suite"]:
# To keep the messages.json fixtures file for the test
# suite fast, don't add these users and subscriptions
# when running populate_db for the test suite
# to imitate emoji insertions in stream names
raw_emojis = ["😎", "😂", "🐱👤"]
zulip_stream_dict: Dict[str, Dict[str, Any]] = {
"devel": {"description": "For developing"},
# ビデオゲーム - VideoGames (japanese)
"ビデオゲーム": {"description": f"Share your favorite video games! {raw_emojis[2]}"},
"announce": {
"description": "For announcements",
"stream_post_policy": Stream.STREAM_POST_POLICY_ADMINS,
},
"design": {"description": "For design"},
"support": {"description": "For support"},
"social": {"description": "For socializing"},
"test": {"description": "For testing `code`"},
"errors": {"description": "For errors"},
# 조리법 - Recipes (Korean) , Пельмени - Dumplings (Russian)
"조리법 "
+ raw_emojis[0]: {"description": "Everything cooking, from pasta to Пельмени"},
}
extra_stream_names = [
"802.11a",
"Ad Hoc Network",
"Augmented Reality",
"Cycling",
"DPI",
"FAQ",
"FiFo",
"commits",
"Control panel",
"desktop",
"компьютеры",
"Data security",
"desktop",
"काम",
"discussions",
"Cloud storage",
"GCI",
"Vaporware",
"Recent Trends",
"issues",
"live",
"Health",
"mobile",
"空間",
"provision",
"hidrógeno",
"HR",
"アニメ",
]
# Add stream names and stream descriptions
for i in range(options["extra_streams"]):
extra_stream_name = random.choice(extra_stream_names) + " " + str(i)
# to imitate emoji insertions in stream names
if random.random() <= 0.15:
extra_stream_name += random.choice(raw_emojis)
zulip_stream_dict[extra_stream_name] = {
"description": "Auto-generated extra stream.",
}
bulk_create_streams(zulip_realm, zulip_stream_dict)
# Now that we've created the notifications stream, configure it properly.
zulip_realm.notifications_stream = get_stream("announce", zulip_realm)
zulip_realm.save(update_fields=["notifications_stream"])
# Add a few default streams
for default_stream_name in ["design", "devel", "social", "support"]:
DefaultStream.objects.create(
realm=zulip_realm, stream=get_stream(default_stream_name, zulip_realm)
)
# Now subscribe everyone to these streams
subscribe_users_to_streams(zulip_realm, zulip_stream_dict)
create_user_groups()
if not options["test_suite"]:
# We populate the analytics database here for
# development purpose only
call_command("populate_analytics_db")
threads = options["threads"]
jobs: List[Tuple[int, List[List[int]], Dict[str, Any], int]] = []
for i in range(threads):
count = options["num_messages"] // threads
if i < options["num_messages"] % threads:
count += 1
jobs.append((count, personals_pairs, options, random.randint(0, 10 ** 10)))
for job in jobs:
generate_and_send_messages(job)
if options["delete"]:
if not options["test_suite"]:
# These bots are not needed by the test suite
# Also, we don't want interacting with each other
# in dev setup.
internal_zulip_users_nosubs = [
("Zulip Commit Bot", "commit-bot@zulip.com"),
("Zulip Trac Bot", "trac-bot@zulip.com"),
("Zulip Nagios Bot", "nagios-bot@zulip.com"),
]
create_users(
zulip_realm, internal_zulip_users_nosubs, bot_type=UserProfile.DEFAULT_BOT
)
mark_all_messages_as_read()
self.stdout.write("Successfully populated test database.\n")
push_notifications_logger.disabled = False
def mark_all_messages_as_read() -> None:
"""
We want to keep these two flags intact after we
create messages:
has_alert_word
is_private
But we will mark all messages as read to save a step for users.
"""
# Mark all messages as read
UserMessage.objects.all().update(
flags=F("flags").bitor(UserMessage.flags.read),
)
recipient_hash: Dict[int, Recipient] = {}
def get_recipient_by_id(rid: int) -> Recipient:
if rid in recipient_hash:
return recipient_hash[rid]
return Recipient.objects.get(id=rid)
# Create some test messages, including:
# - multiple streams
# - multiple subjects per stream
# - multiple huddles
# - multiple personals converastions
# - multiple messages per subject
# - both single and multi-line content
def generate_and_send_messages(
data: Tuple[int, Sequence[Sequence[int]], Mapping[str, Any], int]
) -> int:
(tot_messages, personals_pairs, options, random_seed) = data
random.seed(random_seed)
with open(
os.path.join(get_or_create_dev_uuid_var_path("test-backend"), "test_messages.json"), "rb"
) as infile:
dialog = orjson.loads(infile.read())
random.shuffle(dialog)
texts = itertools.cycle(dialog)
# We need to filter out streams from the analytics realm as we don't want to generate
# messages to its streams - and they might also have no subscribers, which would break
# our message generation mechanism below.
stream_ids = Stream.objects.filter(realm=get_realm("zulip")).values_list("id", flat=True)
recipient_streams: List[int] = [
recipient.id
for recipient in Recipient.objects.filter(type=Recipient.STREAM, type_id__in=stream_ids)
]
recipient_huddles: List[int] = [h.id for h in Recipient.objects.filter(type=Recipient.HUDDLE)]
huddle_members: Dict[int, List[int]] = {}
for h in recipient_huddles:
huddle_members[h] = [s.user_profile.id for s in Subscription.objects.filter(recipient_id=h)]
# Generate different topics for each stream
possible_topics = {}
for stream_id in recipient_streams:
possible_topics[stream_id] = generate_topics(options["max_topics"])
message_batch_size = options["batch_size"]
num_messages = 0
random_max = 1000000
recipients: Dict[int, Tuple[int, int, Dict[str, Any]]] = {}
messages: List[Message] = []
while num_messages < tot_messages:
saved_data: Dict[str, Any] = {}
message = Message()
message.sending_client = get_client("populate_db")
message.content = next(texts)
randkey = random.randint(1, random_max)
if (
num_messages > 0
and random.randint(1, random_max) * 100.0 / random_max < options["stickyness"]
):
# Use an old recipient
message_type, recipient_id, saved_data = recipients[num_messages - 1]
if message_type == Recipient.PERSONAL:
personals_pair = saved_data["personals_pair"]
random.shuffle(personals_pair)
elif message_type == Recipient.STREAM:
message.subject = saved_data["subject"]
message.recipient = get_recipient_by_id(recipient_id)
elif message_type == Recipient.HUDDLE:
message.recipient = get_recipient_by_id(recipient_id)
elif randkey <= random_max * options["percent_huddles"] / 100.0:
message_type = Recipient.HUDDLE
message.recipient = get_recipient_by_id(random.choice(recipient_huddles))
elif (
randkey
<= random_max * (options["percent_huddles"] + options["percent_personals"]) / 100.0
):
message_type = Recipient.PERSONAL
personals_pair = random.choice(personals_pairs)
random.shuffle(personals_pair)
elif randkey <= random_max * 1.0:
message_type = Recipient.STREAM
message.recipient = get_recipient_by_id(random.choice(recipient_streams))
if message_type == Recipient.HUDDLE:
sender_id = random.choice(huddle_members[message.recipient.id])
message.sender = get_user_profile_by_id(sender_id)
elif message_type == Recipient.PERSONAL:
message.recipient = Recipient.objects.get(
type=Recipient.PERSONAL, type_id=personals_pair[0]
)
message.sender = get_user_profile_by_id(personals_pair[1])
saved_data["personals_pair"] = personals_pair
elif message_type == Recipient.STREAM:
# Pick a random subscriber to the stream
message.sender = random.choice(
list(Subscription.objects.filter(recipient=message.recipient))
).user_profile
message.subject = random.choice(possible_topics[message.recipient.id])
saved_data["subject"] = message.subject
message.date_sent = choose_date_sent(num_messages, tot_messages, options["threads"])
messages.append(message)
recipients[num_messages] = (message_type, message.recipient.id, saved_data)
num_messages += 1
if (num_messages % message_batch_size) == 0:
# Send the batch and empty the list:
send_messages(messages)
messages = []
if len(messages) > 0:
# If there are unsent messages after exiting the loop, send them:
send_messages(messages)
return tot_messages
def send_messages(messages: List[Message]) -> None:
# We disable USING_RABBITMQ here, so that deferred work is
# executed in do_send_message_messages, rather than being
# queued. This is important, because otherwise, if run-dev.py
# wasn't running when populate_db was run, a developer can end
# up with queued events that reference objects from a previous
# life of the database, which naturally throws exceptions.
settings.USING_RABBITMQ = False
message_dict_list = []
for message in messages:
message_dict = build_message_send_dict(message=message)
message_dict_list.append(message_dict)
do_send_messages(message_dict_list)
bulk_create_reactions(messages)
settings.USING_RABBITMQ = True
def get_message_to_users(message_ids: List[int]) -> Dict[int, List[int]]:
rows = UserMessage.objects.filter(
message_id__in=message_ids,
).values("message_id", "user_profile_id")
result: Dict[int, List[int]] = defaultdict(list)
for row in rows:
result[row["message_id"]].append(row["user_profile_id"])
return result
def bulk_create_reactions(all_messages: List[Message]) -> None:
reactions: List[Reaction] = []
num_messages = int(0.2 * len(all_messages))
messages = random.sample(all_messages, num_messages)
message_ids = [message.id for message in messages]
message_to_users = get_message_to_users(message_ids)
for message_id in message_ids:
msg_user_ids = message_to_users[message_id]
if msg_user_ids:
# Now let between 1 and 7 users react.
#
# Ideally, we'd make exactly 1 reaction more common than
# this algorithm generates.
max_num_users = min(7, len(msg_user_ids))
num_users = random.randrange(1, max_num_users + 1)
user_ids = random.sample(msg_user_ids, num_users)
for user_id in user_ids:
# each user does between 1 and 3 emojis
num_emojis = random.choice([1, 2, 3])
emojis = random.sample(DEFAULT_EMOJIS, num_emojis)
for emoji_name, emoji_code in emojis:
reaction = Reaction(
user_profile_id=user_id,
message_id=message_id,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=Reaction.UNICODE_EMOJI,
)
reactions.append(reaction)
Reaction.objects.bulk_create(reactions)
def choose_date_sent(num_messages: int, tot_messages: int, threads: int) -> datetime:
# Spoofing time not supported with threading
if threads != 1:
return timezone_now()
# Distrubutes 80% of messages starting from 5 days ago, over a period
# of 3 days. Then, distributes remaining messages over past 24 hours.
amount_in_first_chunk = int(tot_messages * 0.8)
amount_in_second_chunk = tot_messages - amount_in_first_chunk
if num_messages < amount_in_first_chunk:
# Distribute starting from 5 days ago, over a period
# of 3 days:
spoofed_date = timezone_now() - timezone_timedelta(days=5)
interval_size = 3 * 24 * 60 * 60 / amount_in_first_chunk
lower_bound = interval_size * num_messages
upper_bound = interval_size * (num_messages + 1)
else:
# We're in the last 20% of messages, distribute them over the last 24 hours:
spoofed_date = timezone_now() - timezone_timedelta(days=1)
interval_size = 24 * 60 * 60 / amount_in_second_chunk
lower_bound = interval_size * (num_messages - amount_in_first_chunk)
upper_bound = interval_size * (num_messages - amount_in_first_chunk + 1)
offset_seconds = random.uniform(lower_bound, upper_bound)
spoofed_date += timezone_timedelta(seconds=offset_seconds)
return spoofed_date
def create_user_groups() -> None:
zulip = get_realm("zulip")
members = [
get_user_by_delivery_email("cordelia@zulip.com", zulip),
get_user_by_delivery_email("hamlet@zulip.com", zulip),
]
create_user_group("hamletcharacters", members, zulip, description="Characters of Hamlet")
| hackerkid/zulip | zilencer/management/commands/populate_db.py | Python | apache-2.0 | 45,898 |
# -*- coding: utf-8 -*-
from abc import ABC
from copy import deepcopy
from dataclasses import asdict, dataclass
from datetime import datetime, timezone
from enum import Enum, unique
from typing import Optional
__author__ = 'ft'
class SessionNSBase(ABC):
def to_dict(self):
return asdict(self)
@classmethod
def from_dict(cls, data):
_data = deepcopy(data) # do not modify callers data
return cls(**_data)
@unique
class LoginApplication(Enum):
idp = 'idp'
authn = 'authn'
signup = 'signup'
@dataclass()
class Common(SessionNSBase):
eppn: Optional[str] = None
is_logged_in: bool = False
login_source: Optional[LoginApplication] = None
preferred_language: Optional[str] = None
def to_dict(self):
res = asdict(self)
if res.get('login_source') is not None:
res['login_source'] = res['login_source'].value
return res
@classmethod
def from_dict(cls, data):
_data = deepcopy(data) # do not modify callers data
if _data.get('login_source') is not None:
_data['login_source'] = LoginApplication(_data['login_source'])
return cls(**_data)
@dataclass()
class MfaAction(SessionNSBase):
success: bool = False
issuer: Optional[str] = None
authn_instant: Optional[str] = None
authn_context: Optional[str] = None
@dataclass()
class TimestampedNS(SessionNSBase):
ts: Optional[datetime] = None
def to_dict(self):
res = super(TimestampedNS, self).to_dict()
if res.get('ts') is not None:
res['ts'] = str(int(res['ts'].timestamp()))
return res
@classmethod
def from_dict(cls, data):
_data = deepcopy(data) # do not modify callers data
_ts = _data.get('ts')
if _ts is not None:
# Load timestamp from ISO format string, or fallback to old UNIX time.
# When this code is deployed everywhere, we can change to ISO format in to_dict above.
if isinstance(_ts, str) and _ts.isdigit():
_data['ts'] = datetime.fromtimestamp(int(_ts), tz=timezone.utc)
else:
_data['ts'] = datetime.fromisoformat(_ts)
return cls(**_data)
@dataclass
class ResetPasswordNS(SessionNSBase):
generated_password_hash: Optional[str] = None
# XXX the keys below are not in use yet. They are set in eduid-common,
# in a way that the security app understands. Once the (reset|change)
# password views are removed from the security app, we will be able to
# start using them. The session key reauthn-for-chpass is in the same
# situation.
extrasec_u2f_challenge: Optional[str] = None
extrasec_webauthn_state: Optional[str] = None
@dataclass()
class Signup(TimestampedNS):
email_verification_code: Optional[str] = None
@dataclass()
class Actions(TimestampedNS):
session: Optional[str] = None
@dataclass()
class IdP_Namespace(TimestampedNS):
# The SSO cookie value last set by the IdP. Used to debug issues with browsers not
# honoring Set-Cookie in redirects, or something.
sso_cookie_val: Optional[str] = None
| SUNET/eduid-common | src/eduid_common/session/namespaces.py | Python | bsd-3-clause | 3,156 |
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import engine.logger
from engine.optimizer import base
from engine.optimizer import xmlrpcserver
logger = engine.logger.getLogger(__name__)
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`engine.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found or None if no results were obtained.
"""
paramSource = base.ParameterSource(strategyParameters)
resultSinc = base.ResultSinc()
s = xmlrpcserver.Server(paramSource, resultSinc, barFeed, address, port)
logger.info("Starting server")
s.serve()
logger.info("Server finished")
ret = None
bestResult, bestParameters = resultSinc.getBest()
if bestResult is not None:
logger.info("Best final result %s with parameters %s" % (bestResult, bestParameters.args))
ret = Results(bestParameters.args, bestResult)
else:
logger.error("No results. All jobs failed or no jobs were processed.")
return ret
| Yam-cn/potato | engine/optimizer/server.py | Python | apache-2.0 | 2,635 |
# User creation text spoke
#
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.core.constants import FIRSTBOOT_ENVIRON, PASSWORD_SET, PASSWORD_POLICY_USER
from pyanaconda.flags import flags
from pyanaconda.core.i18n import N_, _
from pyanaconda.core.regexes import GECOS_VALID
from pyanaconda.modules.common.constants.services import USERS
from pyanaconda.modules.common.util import is_module_available
from pyanaconda.ui.categories.user_settings import UserSettingsCategory
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.tui.spokes import NormalTUISpoke
from pyanaconda.ui.tui.tuiobject import Dialog, PasswordDialog, report_if_failed, report_check_func
from pyanaconda.ui.lib.users import get_user_list, set_user_list
from pyanaconda.core.users import guess_username, check_username, check_grouplist
from simpleline.render.screen import InputState
from simpleline.render.containers import ListColumnContainer
from simpleline.render.widgets import CheckboxWidget, EntryWidget
__all__ = ["UserSpoke"]
FULLNAME_ERROR_MSG = N_("Full name can't contain the ':' character")
class UserSpoke(FirstbootSpokeMixIn, NormalTUISpoke):
"""
.. inheritance-diagram:: UserSpoke
:parts: 3
"""
category = UserSettingsCategory
@staticmethod
def get_screen_id():
"""Return a unique id of this UI screen."""
return "user-configuration"
@classmethod
def should_run(cls, environment, data):
"""Should the spoke run?"""
if not is_module_available(USERS):
return False
if FirstbootSpokeMixIn.should_run(environment, data):
return True
# the user spoke should run always in the anaconda and in firstboot only
# when doing reconfig or if no user has been created in the installation
users_module = USERS.get_proxy()
user_list = get_user_list(users_module)
if environment == FIRSTBOOT_ENVIRON and data and not user_list:
return True
return False
def __init__(self, data, storage, payload):
FirstbootSpokeMixIn.__init__(self)
NormalTUISpoke.__init__(self, data, storage, payload)
self.initialize_start()
# connect to the Users DBus module
self._users_module = USERS.get_proxy()
self.title = N_("User creation")
self._container = None
# was user creation requested by the Users DBus module
# - at the moment this basically means user creation was
# requested via kickstart
# - note that this does not currently update when user
# list is changed via DBus
self._user_requested = False
self._user_cleared = False
# should a user be created ?
self._create_user = False
self._user_list = get_user_list(self._users_module, add_default=True)
# if user has a name, it's an actual user that has been requested,
# rather than a default user added by us
if self.user.name:
self._user_requested = True
self._create_user = True
self._use_password = self.user.is_crypted or self.user.password
self._groups = ""
self._is_admin = False
self.errors = []
self._users_module = USERS.get_proxy()
self.initialize_done()
@property
def user(self):
"""The user that is manipulated by the User spoke.
This user is always the first one in the user list.
:return: a UserData instance
"""
return self._user_list[0]
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
# refresh the user list
self._user_list = get_user_list(self._users_module, add_default=True, add_if_not_empty=self._user_cleared)
self._is_admin = self.user.has_admin_priviledges()
self._groups = ", ".join(self.user.groups)
self._container = ListColumnContainer(1)
w = CheckboxWidget(title=_("Create user"), completed=self._create_user)
self._container.add(w, self._set_create_user)
if self._create_user:
dialog = Dialog(title=_("Full name"), conditions=[self._check_fullname])
self._container.add(EntryWidget(dialog.title, self.user.gecos), self._set_fullname, dialog)
dialog = Dialog(title=_("User name"), conditions=[self._check_username])
self._container.add(EntryWidget(dialog.title, self.user.name), self._set_username, dialog)
w = CheckboxWidget(title=_("Use password"), completed=self._use_password)
self._container.add(w, self._set_use_password)
if self._use_password:
password_dialog = PasswordDialog(
title=_("Password"),
policy_name=PASSWORD_POLICY_USER
)
if self.user.password:
entry = EntryWidget(password_dialog.title, _(PASSWORD_SET))
else:
entry = EntryWidget(password_dialog.title)
self._container.add(entry, self._set_password, password_dialog)
msg = _("Administrator")
w = CheckboxWidget(title=msg, completed=self._is_admin)
self._container.add(w, self._set_administrator)
dialog = Dialog(title=_("Groups"), conditions=[self._check_groups])
self._container.add(EntryWidget(dialog.title, self._groups), self._set_groups, dialog)
self.window.add_with_separator(self._container)
@report_if_failed(message=FULLNAME_ERROR_MSG)
def _check_fullname(self, user_input, report_func):
return GECOS_VALID.match(user_input) is not None
@report_check_func()
def _check_username(self, user_input, report_func):
return check_username(user_input)
@report_check_func()
def _check_groups(self, user_input, report_func):
return check_grouplist(user_input)
def _set_create_user(self, args):
self._create_user = not self._create_user
def _set_fullname(self, dialog):
self.user.gecos = dialog.run()
def _set_username(self, dialog):
self.user.name = dialog.run()
def _set_use_password(self, args):
self._use_password = not self._use_password
def _set_password(self, password_dialog):
password = password_dialog.run()
while password is None:
password = password_dialog.run()
self.user.password = password
def _set_administrator(self, args):
self._is_admin = not self._is_admin
def _set_groups(self, dialog):
self._groups = dialog.run()
def show_all(self):
NormalTUISpoke.show_all(self)
# if we have any errors, display them
while self.errors:
print(self.errors.pop())
@property
def completed(self):
""" Verify a user is created; verify pw is set if option checked. """
user_list = get_user_list(self._users_module)
if user_list:
if self._use_password and not bool(self.user.password or self.user.is_crypted):
return False
else:
return True
else:
return False
@property
def showable(self):
return not (self.completed and flags.automatedInstall
and self._user_requested and not conf.ui.can_change_users)
@property
def mandatory(self):
"""The spoke is mandatory only if some input is missing.
Possible reasons to be mandatory:
- No admin user has been created
- Password has been requested but not entered
"""
return (not self._users_module.CheckAdminUserExists() or
(self._use_password and not bool(self.user.password or
self.user.is_crypted)))
@property
def status(self):
user_list = get_user_list(self._users_module)
if not user_list:
return _("No user will be created")
elif self._use_password and not bool(self.user.password or self.user.is_crypted):
return _("You must set a password")
elif user_list[0].has_admin_priviledges():
return _("Administrator %s will be created") % user_list[0].name
else:
return _("User %s will be created") % user_list[0].name
def input(self, args, key):
if self._container.process_user_input(key):
self.apply()
return InputState.PROCESSED_AND_REDRAW
return super().input(args, key)
def apply(self):
if self.user.gecos and not self.user.name:
username = guess_username(self.user.gecos)
valid, msg = check_username(username)
if not valid:
self.errors.append(_("Invalid user name: %(name)s.\n%(error_message)s")
% {"name": username, "error_message": msg})
else:
self.user.name = guess_username(self.user.gecos)
self.user.groups = [g.strip() for g in self._groups.split(",") if g]
# Add or remove user admin status
self.user.set_admin_priviledges(self._is_admin)
# encrypt and store password only if user entered anything; this should
# preserve passwords set via kickstart
if self._use_password and self.user.password and len(self.user.password) > 0:
self.user.password = self.user.password
self.user.is_crypted = True
# clear pw when user unselects to use pw
else:
self.user.password = ""
self.user.is_crypted = False
# Turning user creation off clears any already configured user,
# regardless of origin (kickstart, user, DBus).
if not self._create_user and self.user.name:
self.user.name = ""
self._user_cleared = True
# An the other hand, if we have a user with name set,
# it is valid and should be used if the spoke is re-visited.
if self.user.name:
self._user_cleared = False
# Set the user list while removing any unset users, where unset
# means the user has nema == "".
set_user_list(self._users_module, self._user_list, remove_unset=True)
| jkonecny12/anaconda | pyanaconda/ui/tui/spokes/user.py | Python | gpl-2.0 | 11,266 |
# -*- coding: utf-8 -*-
from module.plugins.internal.Account import Account
class BitshareCom(Account):
__name__ = "BitshareCom"
__type__ = "account"
__version__ = "0.19"
__status__ = "testing"
__description__ = """Bitshare account plugin"""
__license__ = "GPLv3"
__authors__ = [("Paul King", None)]
def grab_info(self, user, password, data):
html = self.load("http://bitshare.com/mysettings.html")
if "\"http://bitshare.com/myupgrade.html\">Free" in html:
return {'validuntil': -1, 'trafficleft': -1, 'premium': False}
if not '<input type="checkbox" name="directdownload" checked="checked" />' in html:
self.log_warning(_("Activate direct Download in your Bitshare Account"))
return {'validuntil': -1, 'trafficleft': -1, 'premium': True}
def signin(self, user, password, data):
html = self.load("https://bitshare.com/login.html",
post={'user' : user,
'password': password,
'submit' : "Login"})
if "login" in self.req.lastEffectiveURL:
self.fail_login()
| Guidobelix/pyload | module/plugins/accounts/BitshareCom.py | Python | gpl-3.0 | 1,192 |
import os
from pathlib import Path
from types import MethodType
from typing import Type
from unittest.mock import Mock
import pytest
from baby_steps import given, then, when
from vedro import Scenario
from vedro.core import ScenarioResult, StepResult, VirtualScenario, VirtualStep
def make_scenario_path(path: str = "", name: str = "scenario.py") -> Path:
return Path(os.getcwd()) / "scenarios" / path / name
@pytest.fixture()
def scenario_():
scenario = Mock(Scenario)
scenario.__file__ = str(make_scenario_path())
return scenario
@pytest.fixture()
def virtual_scenario(scenario_: Type[scenario_]):
virtual_scenario = VirtualScenario(scenario_, [])
return virtual_scenario
def test_scenario_result():
with when:
subject = "<subject>"
namespace = "<namespace>"
scenario_ = Mock(Scenario)
scenario_.subject = subject
scenario_.__file__ = str(make_scenario_path(namespace))
virtual_scenario = VirtualScenario(scenario_, [])
scenario_result = ScenarioResult(virtual_scenario)
with then:
assert scenario_result.scenario == virtual_scenario
assert scenario_result.step_results == []
assert scenario_result.is_passed() is False
assert scenario_result.is_failed() is False
assert scenario_result.is_skipped() is False
assert scenario_result.started_at is None
assert scenario_result.ended_at is None
assert scenario_result.scope == {}
assert scenario_result.rerun == 0
def test_scenario_result_mark_passed(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
with when:
res = scenario_result.mark_passed()
with then:
assert res == scenario_result
assert scenario_result.is_passed() is True
def test_scenario_result_mark_failed(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
with when:
res = scenario_result.mark_failed()
with then:
assert res == scenario_result
assert scenario_result.is_failed() is True
def test_scenario_result_mark_skipped(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
with when:
res = scenario_result.mark_skipped()
with then:
assert res == scenario_result
assert scenario_result.is_skipped() is True
def test_scenario_result_set_started_at(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
started_at = 1.0
with when:
res = scenario_result.set_started_at(started_at)
with then:
assert res == scenario_result
assert scenario_result.started_at == started_at
assert scenario_result.elapsed == 0.0
def test_scenario_result_set_ended_at(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
ended_at = 1.0
with when:
res = scenario_result.set_ended_at(ended_at)
with then:
assert res == scenario_result
assert scenario_result.ended_at == ended_at
assert scenario_result.elapsed == 0.0
def test_scenario_result_elapsed(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
started_at = 3.0
scenario_result.set_started_at(started_at)
ended_at = 1.0
scenario_result.set_ended_at(ended_at)
with when:
res = scenario_result.elapsed
with then:
assert res == ended_at - started_at
def test_scenario_result_add_step_result(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
virtual_step = VirtualStep(Mock(MethodType))
step_result = StepResult(virtual_step)
with when:
res = scenario_result.add_step_result(step_result)
with then:
assert res is None
assert scenario_result.step_results == [step_result]
def test_scenario_result_get_step_results(*, virtual_scenario: VirtualScenario):
with given:
virtual_step1 = VirtualStep(Mock(MethodType))
step_result1 = StepResult(virtual_step1)
virtual_step2 = VirtualStep(Mock(MethodType))
step_result2 = StepResult(virtual_step2)
scenario_result = ScenarioResult(virtual_scenario)
scenario_result.add_step_result(step_result1)
scenario_result.add_step_result(step_result2)
with when:
step_results = scenario_result.step_results
with then:
assert step_results == [step_result1, step_result2]
def test_scenario_result_set_scope(*, virtual_scenario: VirtualScenario):
with given:
scenario_result = ScenarioResult(virtual_scenario)
scope = {}
with when:
res = scenario_result.set_scope(scope)
with then:
assert res is None
assert scenario_result.scope == scope
def test_scenario_result_repr(*, virtual_scenario: VirtualScenario):
with when:
scenario_result = ScenarioResult(virtual_scenario)
with then:
assert repr(scenario_result) == f"ScenarioResult<{virtual_scenario!r}>"
def test_scenario_result_eq(*, virtual_scenario: VirtualScenario):
with given:
scenario_result1 = ScenarioResult(virtual_scenario)
scenario_result2 = ScenarioResult(virtual_scenario)
with when:
res = scenario_result1 == scenario_result2
with then:
assert res is True
def test_scenario_result_not_eq():
with given:
scenario1_ = Mock(Scenario)
scenario1_.__file__ = str(make_scenario_path())
virtual_scenario1 = VirtualScenario(scenario1_, [])
scenario_result1 = ScenarioResult(virtual_scenario1)
scenario2_ = Mock(Scenario)
scenario2_.__file__ = str(make_scenario_path())
virtual_scenario2 = VirtualScenario(scenario2_, [])
scenario_result2 = ScenarioResult(virtual_scenario2)
with when:
res = scenario_result1 == scenario_result2
with then:
assert res is False
| nikitanovosibirsk/vedro | tests/core/test_scenario_result.py | Python | apache-2.0 | 6,232 |
from django.shortcuts import get_object_or_404
from django.urls import reverse
from rest_framework import generics, status
from rest_framework.views import APIView
from rest_framework.response import Response
from zentral.core.events.base import EventRequest
from zentral.utils.drf import DefaultDjangoModelPermissions, DjangoPermissionRequired
from .models import Instance
from .serializers import InstanceSerializer
from .tasks import sync_inventory
class InstanceList(generics.ListAPIView):
"""
List all Instances
"""
queryset = Instance.objects.all()
permission_classes = [DefaultDjangoModelPermissions]
serializer_class = InstanceSerializer
class InstanceDetail(generics.RetrieveAPIView):
"""
Retrieve an Instance
"""
queryset = Instance.objects.all()
permission_classes = [DefaultDjangoModelPermissions]
serializer_class = InstanceSerializer
class StartInstanceSync(APIView):
"""
Start instance inventory synchronization
"""
permission_required = ("wsone.view_instance", "inventory.change_machinesnapshot")
permission_classes = [DjangoPermissionRequired]
def post(self, request, *args, **kwargs):
instance = get_object_or_404(Instance, pk=self.kwargs["pk"])
event_request = EventRequest.build_from_request(request)
result = sync_inventory.apply_async((instance.pk, event_request.serialize()))
return Response({"task_id": result.id,
"task_result_url": reverse("base_api:task_result", args=(result.id,))},
status=status.HTTP_201_CREATED)
| zentralopensource/zentral | zentral/contrib/wsone/api_views.py | Python | apache-2.0 | 1,602 |
from flask import Blueprint, render_template, abort
from jinja2 import TemplateNotFound
simple_page = Blueprint('simple_page', __name__,
template_folder='templates')
@simple_page.route('/', defaults={'page': 'index'})
@simple_page.route('/<page>')
def show(page):
try:
return render_template('pages/%s.html' % page)
except TemplateNotFound:
abort(404)
| jackTheRipper/iotrussia | web_server/lib/flask-master/examples/blueprintexample/simple_page/simple_page.py | Python | gpl-2.0 | 402 |
"""Support for interface with a Bose Soundtouch."""
from __future__ import annotations
import logging
import re
from libsoundtouch import soundtouch_device
from libsoundtouch.utils import Source
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant, ServiceCall, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import (
DOMAIN,
SERVICE_ADD_ZONE_SLAVE,
SERVICE_CREATE_ZONE,
SERVICE_PLAY_EVERYWHERE,
SERVICE_REMOVE_ZONE_SLAVE,
)
_LOGGER = logging.getLogger(__name__)
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF,
}
DATA_SOUNDTOUCH = "soundtouch"
ATTR_SOUNDTOUCH_GROUP = "soundtouch_group"
ATTR_SOUNDTOUCH_ZONE = "soundtouch_zone"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({vol.Required("master"): cv.entity_id})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
DEFAULT_NAME = "Bose Soundtouch"
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_SET
| SUPPORT_TURN_ON
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Bose Soundtouch platform."""
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info["host"]
port = int(discovery_info["port"])
# if device already exists by config
if host in [device.config["host"] for device in hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {"id": "ha.component.soundtouch", "host": host, "port": port}
bose_soundtouch_entity = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(bose_soundtouch_entity)
add_entities([bose_soundtouch_entity], True)
else:
name = config.get(CONF_NAME)
remote_config = {
"id": "ha.component.soundtouch",
"port": config.get(CONF_PORT),
"host": config.get(CONF_HOST),
}
bose_soundtouch_entity = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(bose_soundtouch_entity)
add_entities([bose_soundtouch_entity], True)
def service_handle(service: ServiceCall) -> None:
"""Handle the applying of a service."""
master_device_id = service.data.get("master")
slaves_ids = service.data.get("slaves")
slaves = []
if slaves_ids:
slaves = [
device
for device in hass.data[DATA_SOUNDTOUCH]
if device.entity_id in slaves_ids
]
master = next(
[
device
for device in hass.data[DATA_SOUNDTOUCH]
if device.entity_id == master_device_id
].__iter__(),
None,
)
if master is None:
_LOGGER.warning(
"Unable to find master with entity_id: %s", str(master_device_id)
)
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [
d for d in hass.data[DATA_SOUNDTOUCH] if d.entity_id != master_device_id
]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(
DOMAIN,
SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE,
)
hass.services.register(
DOMAIN,
SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA,
)
class SoundTouchDevice(MediaPlayerEntity):
"""Representation of a SoundTouch Bose device."""
def __init__(self, name, config):
"""Create Soundtouch Entity."""
self._device = soundtouch_device(config["host"], config["port"])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = None
self._volume = None
self._config = config
self._zone = None
@property
def config(self):
"""Return specific soundtouch configuration."""
return self._config
@property
def device(self):
"""Return Soundtouch device."""
return self._device
def update(self):
"""Retrieve the latest data."""
self._status = self._device.status()
self._volume = self._device.volume()
self._zone = self.get_zone_info()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume.actual / 100
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status.source == "STANDBY":
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def source(self):
"""Name of the current input source."""
return self._status.source
@property
def source_list(self):
"""List of available input sources."""
return [
Source.AUX.value,
Source.BLUETOOTH.value,
]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._volume.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SOUNDTOUCH
def turn_off(self):
"""Turn off media player."""
self._device.power_off()
def turn_on(self):
"""Turn on media player."""
self._device.power_on()
def volume_up(self):
"""Volume up the media player."""
self._device.volume_up()
def volume_down(self):
"""Volume down media player."""
self._device.volume_down()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.set_volume(int(volume * 100))
def mute_volume(self, mute):
"""Send mute command."""
self._device.mute()
def media_play_pause(self):
"""Simulate play pause media player."""
self._device.play_pause()
def media_play(self):
"""Send play command."""
self._device.play()
def media_pause(self):
"""Send media pause command to media player."""
self._device.pause()
def media_next_track(self):
"""Send next track command."""
self._device.next_track()
def media_previous_track(self):
"""Send the previous track command."""
self._device.previous_track()
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._status.image
@property
def media_title(self):
"""Title of current playing media."""
if self._status.station_name is not None:
return self._status.station_name
if self._status.artist is not None:
return f"{self._status.artist} - {self._status.track}"
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._status.duration
@property
def media_artist(self):
"""Artist of current playing media."""
return self._status.artist
@property
def media_track(self):
"""Artist of current playing media."""
return self._status.track
@property
def media_album_name(self):
"""Album name of current playing media."""
return self._status.album
async def async_added_to_hass(self):
"""Populate zone info which requires entity_id."""
@callback
def async_update_on_start(event):
"""Schedule an update when all platform entities have been added."""
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_update_on_start
)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Starting media with media_id: %s", media_id)
if re.match(r"http?://", str(media_id)):
# URL
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
# Preset
presets = self._device.presets()
preset = next(
[
preset for preset in presets if preset.preset_id == str(media_id)
].__iter__(),
None,
)
if preset is not None:
_LOGGER.debug("Playing preset: %s", preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning("Unable to find preset with id %s", media_id)
def select_source(self, source):
"""Select input source."""
if source == Source.AUX.value:
_LOGGER.debug("Selecting source AUX")
self._device.select_source_aux()
elif source == Source.BLUETOOTH.value:
_LOGGER.debug("Selecting source Bluetooth")
self._device.select_source_bluetooth()
else:
_LOGGER.warning("Source %s is not supported", source)
def create_zone(self, slaves):
"""
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
"""
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info("Creating zone with master %s", self._device.config.name)
self._device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
"""
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info(
"Removing slaves from zone with master %s", self._device.config.name
)
# SoundTouch API seems to have a bug and won't remove slaves if there are
# more than one in the payload. Therefore we have to loop over all slaves
# and remove them individually
for slave in slaves:
# make sure to not try to remove the master (aka current device)
if slave.entity_id != self.entity_id:
self._device.remove_zone_slave([slave.device])
def add_zone_slave(self, slaves):
"""
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master %s", self._device.config.name
)
self._device.add_zone_slave([slave.device for slave in slaves])
@property
def extra_state_attributes(self):
"""Return entity specific state attributes."""
attributes = {}
if self._zone and "master" in self._zone:
attributes[ATTR_SOUNDTOUCH_ZONE] = self._zone
# Compatibility with how other components expose their groups (like SONOS).
# First entry is the master, others are slaves
group_members = [self._zone["master"]] + self._zone["slaves"]
attributes[ATTR_SOUNDTOUCH_GROUP] = group_members
return attributes
def get_zone_info(self):
"""Return the current zone info."""
zone_status = self._device.zone_status()
if not zone_status:
return None
# Due to a bug in the SoundTouch API itself client devices do NOT return their
# siblings as part of the "slaves" list. Only the master has the full list of
# slaves for some reason. To compensate for this shortcoming we have to fetch
# the zone info from the master when the current device is a slave until this is
# fixed in the SoundTouch API or libsoundtouch, or of course until somebody has a
# better idea on how to fix this.
# In addition to this shortcoming, libsoundtouch seems to report the "is_master"
# property wrong on some slaves, so the only reliable way to detect if the current
# devices is the master, is by comparing the master_id of the zone with the device_id
if zone_status.master_id == self._device.config.device_id:
return self._build_zone_info(self.entity_id, zone_status.slaves)
# The master device has to be searched by it's ID and not IP since libsoundtouch / BOSE API
# do not return the IP of the master for some slave objects/responses
master_instance = self._get_instance_by_id(zone_status.master_id)
if master_instance is not None:
master_zone_status = master_instance.device.zone_status()
return self._build_zone_info(
master_instance.entity_id, master_zone_status.slaves
)
# We should never end up here since this means we haven't found a master device to get the
# correct zone info from. In this case, assume current device is master
return self._build_zone_info(self.entity_id, zone_status.slaves)
def _get_instance_by_ip(self, ip_address):
"""Search and return a SoundTouchDevice instance by it's IP address."""
for instance in self.hass.data[DATA_SOUNDTOUCH]:
if instance and instance.config["host"] == ip_address:
return instance
return None
def _get_instance_by_id(self, instance_id):
"""Search and return a SoundTouchDevice instance by it's ID (aka MAC address)."""
for instance in self.hass.data[DATA_SOUNDTOUCH]:
if instance and instance.device.config.device_id == instance_id:
return instance
return None
def _build_zone_info(self, master, zone_slaves):
"""Build the exposed zone attributes."""
slaves = []
for slave in zone_slaves:
slave_instance = self._get_instance_by_ip(slave.device_ip)
if slave_instance:
slaves.append(slave_instance.entity_id)
attributes = {
"master": master,
"is_master": master == self.entity_id,
"slaves": slaves,
}
return attributes
| rohitranjan1991/home-assistant | homeassistant/components/soundtouch/media_player.py | Python | mit | 17,069 |
import sys
import smtplib
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
def send_mail(files=None):
send_to = ["discoalerts@gmail.com"]
send_from = "disco@netsec.colostate.edu"
text = "Disco detected an outage. See attached files."
subject = 'Disco Alert'
server = "mail.netsec.colostate.edu"
assert isinstance(send_to, list)
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(f)
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
if __name__ == "__main__":
files=[sys.argv[1]]
send_mail(files=files) | romain-fontugne/disco | src/emailWithAttachment.py | Python | gpl-3.0 | 1,191 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
import pkg1.p1a, pkg1.p1b, pkg1.sub
import pkg2.p2a, pkg2.p2b
import othermods.othera, othermods.otherb
import othermods.sub.osa, othermods.sub.osb
| blueyed/coveragepy | tests/modules/usepkgs.py | Python | apache-2.0 | 304 |
# -*- coding: utf-8 -*-
import unittest
from three_sum_closest import three_sum_closest
class Test3SumClosest(unittest.TestCase):
def test_three_sum_closest(self):
tests = [
([-1,0,1,0], 0, 0),
([-1,-1,1,0], -1, -1),
([1, 2, 3, 4], 5, 6),
([1, 1, 1, 0], 100, 3),
([1,2,4,8,16,32,64,128], 82, 82),
([4,0,5,-5,3,3,0,-4,-5], -2, -2),
]
for test in tests:
actual = three_sum_closest(test[0], test[1])
expected = test[2]
self.assertEqual(actual, expected, \
'failed test={}, actual={}'.format(test, actual))
| topliceanu/learn | interview/leetcode/test_three_sum_closest.py | Python | mit | 658 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import logging
import MySQLdb
from PySide2.QtCore import QThread,Signal,Slot
from database.manager import DatabaseManager
from util.util import Util
log = logging.getLogger(__name__)
class PreloadThread(QThread):
_instance = None
updateLabelSignal = Signal(str)
finishedSignal = Signal()
questionSignal = Signal(str)
receiveQuestionSignal = Signal(bool)
def __init__(self,startup,app, parent=None):
super(PreloadThread,self).__init__(parent)
self.startup = startup
self.updateLabelSignal.connect(startup.updateLabel)
self.finishedSignal.connect(startup.complete)
self.questionSignal.connect(startup.sendQuestion)
self.receiveQuestionSignal.connect(self.receiveQuestion)
self.app = app
def loadFont(self):
Util.loadFont("amemuchigothic-01.ttf")
Util.loadFont("rounded-mgenplus-1mn-bold.ttf")
Util.loadFont("rounded-mgenplus-1mn-light.ttf")
Util.loadFont("rounded-mgenplus-1mn-medium.ttf")
Util.loadFont("rounded-mgenplus-1mn-regular.ttf")
Util.loadFont("rounded-mgenplus-1mn-thin.ttf")
Util.loadFont("rounded-mgenplus-2c-black.ttf")
Util.loadFont("rounded-mgenplus-2c-bold.ttf")
Util.loadFont("rounded-mgenplus-2c-light.ttf")
Util.loadFont("rounded-mgenplus-2c-heavy.ttf")
Util.loadFont("rounded-mgenplus-2c-medium.ttf")
Util.loadFont("rounded-mgenplus-2c-regular.ttf")
Util.loadFont("rounded-mgenplus-2c-thin.ttf")
def run(self):
log.debug("preloading...")
self.updateLabelSignal.emit("フォントを読み込み中...")
self.loadFont()
self.updateLabelSignal.emit("データベースに接続中...")
self.connectDatabase()
def connectDatabase(self):
"""データベースに接続する。"""
databaseManager = DatabaseManager()
log.debug("データベースに接続中...")
try:
databaseManager.connect()
self.finishedSignal.emit()
except MySQLdb.OperationalError as e:
message = "データベースに接続できませんでした。\nエラー詳細:%s\n\n再接続をしますか?" % (e.args[1])
self.questionSignal.emit(message)
@Slot(bool)
def receiveQuestion(self,answer):
if answer:
self.connectDatabase()
else:
self.app.exit(0)
def __new__(cls,startup,app):
if cls._instance is None:
cls._instance = super(PreloadThread,cls).__new__(cls,startup,app)
return cls._instance | celtas/NFCAttendancePy | nap/main/preload.py | Python | gpl-3.0 | 2,661 |
import pprint
def period_ns(freq):
return 1e9/freq
def csr_map_update(csr_map, csr_peripherals):
csr_map.update(dict((n, v)
for v, n in enumerate(csr_peripherals, start=(max(csr_map.values()) + 1) if csr_map else 0)))
def csr_map_update_print(csr_map, csr_peripherals):
print()
print("-"*75)
print("Previous Max: {}".format(max(csr_map.values())))
csr_map.update(dict((n, v)
for v, n in enumerate(csr_peripherals, start=max(csr_map.values()) + 1)))
print(" New Max: {}".format(max(csr_map.values())))
csr_values = list((b,a) for a, b in csr_map.items())
csr_values.sort()
pprint.pprint(csr_values)
print("-"*75)
print()
def assert_pll_clock(requested_freq, input, feedback, divide, msg):
output_freq = int(input * feedback / divide / 1e6)
assert output_freq == int(requested_freq / 1e6), (
"%s wants %s but got %i MHz (input=%i MHz feedback=%i divide=%i)" % (
msg, requested_freq, output_freq, int(input/1e6), feedback, divide))
class MHzType(int):
"""
>>> a = MHzType(1)
>>> a == int(1e9)
True
>>> a
1 MHz
>>> b = 5 * MHzType(1)
>>> b == int(5e9)
True
>>> b
5 MHz
>>> c = 200 * MHzType(1)
>>>
"""
def __new__(cls, x):
return int.__new__(cls, int(x * 1e6))
def __str__(self):
return "%i MHz" % int(self / 1e6)
def __repr__(self):
return "%f * MHz()" % float(self / 1e6)
def __mul__(self, o):
return MHz.__class__(float(self) * o / 1e6)
def __rmul__(self, o):
return MHz.__class__(float(self) * o / 1e6)
def to_ns(self):
return 1e9/self
MHz = MHzType(1)
| mithro/HDMI2USB-litex-firmware | targets/utils.py | Python | bsd-2-clause | 1,695 |
"""
Get
---
.. moduleauthor: Jachym Cepicky
"""
# Author: Jachym Cepicky
# http://les-ejk.cz
# Lince:
#
# Web Processing Service implementation
# Copyright (C) 2006 Jachym Cepicky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# references used in the comments of this source code:
# OWS_1-1-0:
# OGC Web Services Common Specification
# version 1.1.0 with Corrigendum 1
# ref.num.: OGC 06-121r3
# WPS_1-0-0:
# OpenGIS(R) Web Processing Service
# version 1.0.0
# ref.num.: OGC 05-007r7
import types
from string import split
import pywps
from pywps.Exceptions import *
import pywps.config
from pywps.Parser import Parser
from pywps.Process.Lang import Lang
class Get(Parser):
""" Main Class for parsing HTTP GET request types """
unparsedInputs = None # temporary store for later validation
requestParser = None
GET_CAPABILITIES = "getcapabilities"
DESCRIBE_PROCESS = "describeprocess"
EXECUTE = "execute"
def __init__(self,wps):
Parser.__init__(self,wps)
self.unparsedInputs = {}
def parse(self,queryString):
"""Parse given string with parameters given in KVP encoding
:param queryString: string of parameters taken from URL in KVP encoding
:returns: parsed inputs object
"""
key = None
value = None
keys = []
maxInputLength = int(pywps.config.getConfigValue("server","maxinputparamlength"))
# parse query string
# arguments are separated by "&" character
# everything is stored into unparsedInputs structure, for latter
# validation
for feature in queryString.split("&"):
feature = feature.strip()
# omit empty KVPs, e.g. due to optional ampersand after the last
# KVP in request string (OWS_1-1-0, p.75, sect. 11.2):
if not feature == '':
if feature.lower() == "wsdl":
self.inputs["wsdl"] = True
break
else:
try:
key,value = split(feature,"=",maxsplit=1)
except:
raise NoApplicableCode(\
'Invalid Key-Value-Pair: "' + \
str(feature) + '"')
if value.find("[") == 0: # if value in brackets:
value = value[1:-1] # delete brackets
if len(value)>maxInputLength:
raise FileSizeExceeded(key)
keys.append(key)
self.unparsedInputs[key.lower()] = value[:maxInputLength]
if not self.inputs.has_key("wsdl"):
# check service name
service = self.checkService()
# check request type
self.checkRequestType()
# parse the request
self.inputs = self.requestParser.parse(self.unparsedInputs, self.inputs)
if not self.inputs:
raise MissingParameterValue("service")
return self.inputs
def checkRequestType(self):
"""Find requested request type and import given request parser."""
if not "request" in self.unparsedInputs:
raise MissingParameterValue("request")
# test, if one of the mandatory WPS operation is called (via request)
# (mandatory operations see WPS_1-0-0 p.4 sect.6.1)
if self.unparsedInputs["request"].lower() ==\
self.GET_CAPABILITIES:
import GetCapabilities
self.requestParser = GetCapabilities.Get(self.wps)
self.inputs["request"] = self.GET_CAPABILITIES
elif self.unparsedInputs["request"].lower() ==\
self.DESCRIBE_PROCESS:
import DescribeProcess
self.requestParser = DescribeProcess.Get(self.wps)
self.inputs["request"] = self.DESCRIBE_PROCESS
elif self.unparsedInputs["request"].lower() ==\
self.EXECUTE:
import Execute
self.requestParser = Execute.Get(self.wps)
self.inputs["request"] = self.EXECUTE
else:
raise InvalidParameterValue("request")
def checkService(self):
""" Check mandatory service name parameter. """
# service name is mandatory for all requests (OWS_1-1-0 p.14 tab.3 +
# p.46 tab.26); service must be "WPS" (WPS_1-0-0 p.17 tab.13 + p.32 tab.39)
if "service" in self.unparsedInputs:
value=self.unparsedInputs["service"].upper()
if value.lower() == "wsdl":
self.inputs["service"] = "wsdl"
elif value.lower() != "wps":
raise InvalidParameterValue("service")
else:
self.inputs["service"] = "wps"
else:
raise MissingParameterValue("service")
return self.inputs["service"]
def checkLanguage(self):
""" Check optional language parameter. """
if "language" in self.unparsedInputs:
value=Lang.getCode(self.unparsedInputs["language"].lower())
if value not in self.wps.languages:
raise InvalidParameterValue("language")
else:
self.inputs["language"] = value
else:
self.inputs["language"] = pywps.DEFAULT_LANG
def checkVersion(self):
""" Check mandatory version parameter. """
if "version" in self.unparsedInputs:
value=self.unparsedInputs["version"]
if value not in self.wps.versions:
raise VersionNegotiationFailed(
'The requested version "' + value + \
'" is not supported by this server.')
else:
self.inputs["version"] = value
else:
raise MissingParameterValue("version")
| jachym/PyWPS-SVN | pywps/Parser/Get.py | Python | gpl-2.0 | 6,535 |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import mc_unittest
from rogerthat.bizz.profile import create_user_profile
from rogerthat.bizz.system import update_app_asset_response
from rogerthat.capi.system import updateAppAsset
from rogerthat.dal.mobile import get_mobile_settings_cached
from rogerthat.models.properties.profiles import MobileDetails
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile
from rogerthat.rpc.rpc import logError
from rogerthat.to.app import UpdateAppAssetRequestTO
class Test(mc_unittest.TestCase):
def testSendNews(self):
self.set_datastore_hr_probability(1)
scale_x = 1
request = UpdateAppAssetRequestTO(u"kind", u"url", scale_x)
app_user = users.User('geert@example.com')
user_profile = create_user_profile(app_user, 'geert', language='en')
mobile = users.get_current_mobile()
user_profile.mobiles = MobileDetails()
user_profile.mobiles.addNew(mobile.account, Mobile.TYPE_ANDROID_HTTP, None, u"rogerthat")
user_profile.put()
ms = get_mobile_settings_cached(mobile)
ms.majorVersion = 0
ms.minorVersion = 2447
ms.put()
updateAppAsset(update_app_asset_response, logError, app_user, request=request)
ms.minorVersion = 2449
ms.put()
updateAppAsset(update_app_asset_response, logError, app_user, request=request)
| rogerthat-platform/rogerthat-backend | src-test/rogerthat_tests/mobicage/capi/test_feature_version.py | Python | apache-2.0 | 1,999 |
#!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
"""module describing a legacy packaging object
This module contains the LegacyAction class, which represents a legacy SVr4
package. On installation, this action will lay down file with sufficient data
to fool the SVr4 packaging tools into thinking that package is installed, albeit
empty."""
import os
import errno
from stat import *
import generic
class LegacyAction(generic.Action):
"""Class representing a legacy SVr4 packaging object."""
name = "legacy"
key_attr = "pkg"
def __init__(self, data=None, **attrs):
generic.Action.__init__(self, data, **attrs)
def directory_references(self):
return [os.path.normpath(os.path.join("var/sadm/pkg", self.attrs["pkg"]))]
def install(self, pkgplan, orig):
"""Client-side method that installs the dummy package files.
Use per-pkg hardlinks to create reference count for pkginfo file"""
pkgdir = os.path.join(pkgplan.image.get_root(), "var/sadm/pkg",
self.attrs["pkg"])
if not os.path.isdir(pkgdir):
os.makedirs(pkgdir, 0755)
pkginfo = os.path.join(pkgdir, "pkginfo")
if not os.path.isfile(pkginfo):
legacy_info = pkgplan.get_legacy_info()
svr4attrs = {
"pkg": self.attrs["pkg"],
"name": legacy_info["description"],
"arch": pkgplan.image.get_arch(),
"version": legacy_info["version"],
"category": "system",
"vendor": None,
"desc": None,
"hotline": None
}
attrs = (
(a.upper(), b)
for a in svr4attrs
for b in ( self.attrs.get(a, svr4attrs[a]), )
if b
)
pfile = file(pkginfo, "w")
for k, v in attrs:
pfile.write("%s=%s\n" % (k, v))
pfile.close()
# create another hardlink to pkginfo file if
# this is not just an upgrade; we use this to make
# uninstall easier
if not orig:
linkfile = os.path.join(pkgdir,
"pkginfo.%d" % (os.stat(pkginfo)[ST_NLINK] + 1))
os.link(pkginfo, linkfile)
# the svr4 pkg commands need contents file to work, but the
# needed directories are in the SUNWpkgcmds package....
# Since this file is always of zero length, we can let this
# fail until those directories (and the commands that
# need them) appear.
try:
file(os.path.join(pkgplan.image.get_root(),
"var/sadm/install/contents"), "a").close()
except IOError, e:
if e.errno != errno.ENOENT:
raise
os.chmod(pkginfo, 0644)
def verify(self, img, **args):
pkgdir = os.path.join(img.get_root(), "var/sadm/pkg",
self.attrs["pkg"])
# XXX this could be a better check & exactly validate pkginfo contents
if not os.path.isdir(pkgdir):
return ["Missing directory var/sadm/pkg/%s" %
self.attrs["pkg"]]
pkginfo = os.path.join(pkgdir, "pkginfo")
if not os.path.isfile(os.path.join(pkgdir, "pkginfo")):
return ["Missing file var/sadm/pkg/%s/pkginfo" %
self.attrs["pkg"]]
return []
def remove(self, pkgplan):
# pkg directory is removed via implicit directory removal
pkgdir = os.path.join(pkgplan.image.get_root(), "var/sadm/pkg",
self.attrs["pkg"])
pkginfo = os.path.join(pkgdir, "pkginfo")
if os.path.isfile(pkginfo):
link_count = os.stat(pkginfo)[ST_NLINK]
linkfile = os.path.join(pkgdir,
"pkginfo.%d" % (link_count))
if os.path.isfile(linkfile):
os.unlink(linkfile)
# do this conditionally to be kinder
# to installations done w/ older versions
if link_count <= 2: # last one
os.unlink(pkginfo)
def generate_indices(self):
"""Generates the indices needed by the search dictionary. See
generic.py for a more detailed explanation."""
return [("legacy", "legacy_pkg", self.attrs["pkg"], None)]
| marcellodesales/svnedge-console | ext/windows/pkg-toolkit/pkg/vendor-packages/pkg/actions/legacy.py | Python | agpl-3.0 | 6,079 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-8 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import pygtk
pygtk.require('2.0')
import gtk
import rose.config_editor.util
import rose.gtk.util
import rose.variable
class EntryArrayValueWidget(gtk.HBox):
"""This is a class to represent multiple array entries."""
TIP_ADD = "Add array element"
TIP_DEL = "Remove array element"
TIP_ELEMENT = "Element {0}"
TIP_ELEMENT_CHAR = "Element {0}: '{1}'"
TIP_LEFT = "Move array element left"
TIP_RIGHT = "Move array element right"
def __init__(self, value, metadata, set_value, hook, arg_str=None):
super(EntryArrayValueWidget, self).__init__(homogeneous=False,
spacing=0)
self.value = value
self.metadata = metadata
self.set_value = set_value
self.hook = hook
self.max_length = self.metadata[rose.META_PROP_LENGTH]
value_array = rose.variable.array_split(self.value)
self.chars_width = max([len(v) for v in value_array] + [1]) + 1
self.last_selected_src = None
arr_type = self.metadata.get(rose.META_PROP_TYPE)
self.is_char_array = (arr_type == "character")
self.is_quoted_array = (arr_type == "quoted")
# Do not treat character or quoted arrays specially when incorrect.
if self.is_char_array:
checker = rose.macros.value.ValueChecker()
for val in value_array:
if not checker.check_character(val):
self.is_char_array = False
if self.is_quoted_array:
checker = rose.macros.value.ValueChecker()
for val in value_array:
if not checker.check_quoted(val):
self.is_quoted_array = False
if self.is_char_array:
for i, val in enumerate(value_array):
value_array[i] = (
rose.config_editor.util.text_for_character_widget(val))
if self.is_quoted_array:
for i, val in enumerate(value_array):
value_array[i] = (
rose.config_editor.util.text_for_quoted_widget(val))
# Designate the number of allowed columns - 10 for 4 chars width
self.num_allowed_columns = 3
self.entry_table = gtk.Table(rows=1,
columns=self.num_allowed_columns,
homogeneous=True)
self.entry_table.connect('focus-in-event', self.hook.trigger_scroll)
self.entry_table.show()
self.entries = []
self.has_titles = False
if "element-titles" in metadata:
self.has_titles = True
self.generate_entries(value_array)
self.generate_buttons()
self.populate_table()
self.pack_start(self.add_del_button_box, expand=False, fill=False)
self.pack_start(self.entry_table, expand=True, fill=True)
self.entry_table.connect_after('size-allocate',
lambda w, e: self.reshape_table())
self.connect('focus-in-event',
lambda w, e: self.hook.get_focus(self.get_focus_entry()))
def get_focus_entry(self):
"""Get either the last selected entry or the last one."""
if self.last_selected_src is not None:
return self.last_selected_src
if len(self.entries) > 0:
return self.entries[-1]
return None
def get_focus_index(self):
"""Get the focus and position within the table of entries."""
text = ''
for entry in self.entries:
val = entry.get_text()
if self.is_char_array:
val = rose.config_editor.util.text_from_character_widget(val)
elif self.is_quoted_array:
val = rose.config_editor.util.text_from_quoted_widget(val)
prefix = get_next_delimiter(self.value[len(text):], val)
if prefix is None:
return None
if entry == self.entry_table.focus_child:
return len(text + prefix) + entry.get_position()
text += prefix + val
return None
def set_focus_index(self, focus_index=None):
"""Set the focus and position within the table of entries."""
if focus_index is None:
return
value_array = rose.variable.array_split(self.value)
text = ''
for i, val in enumerate(value_array):
prefix = get_next_delimiter(self.value[len(text):],
val)
if prefix is None:
return
if (len(text + prefix + val) >= focus_index or
i == len(value_array) - 1):
if len(self.entries) > i:
self.entries[i].grab_focus()
val_offset = focus_index - len(text + prefix)
if self.is_char_array or self.is_quoted_array:
val_offset = max([0, val_offset - 1])
self.entries[i].set_position(val_offset)
return
text += prefix + val
def generate_entries(self, value_array=None):
"""Create the gtk.Entry objects for elements in the array."""
if value_array is None:
value_array = rose.variable.array_split(self.value)
entries = []
for value_item in value_array:
for entry in self.entries:
if entry.get_text() == value_item and entry not in entries:
entries.append(entry)
break
else:
entries.append(self.get_entry(value_item))
self.entries = entries
def generate_buttons(self):
"""Create the left-right movement arrows and add button."""
left_arrow = gtk.Arrow(gtk.ARROW_LEFT, gtk.SHADOW_IN)
left_arrow.show()
left_event_box = gtk.EventBox()
left_event_box.add(left_arrow)
left_event_box.show()
left_event_box.connect('button-press-event',
lambda b, e: self.move_element(-1))
left_event_box.connect('enter-notify-event', self._handle_arrow_enter)
left_event_box.connect('leave-notify-event', self._handle_arrow_leave)
left_event_box.set_tooltip_text(self.TIP_LEFT)
right_arrow = gtk.Arrow(gtk.ARROW_RIGHT, gtk.SHADOW_IN)
right_arrow.show()
right_event_box = gtk.EventBox()
right_event_box.show()
right_event_box.add(right_arrow)
right_event_box.connect(
'button-press-event', lambda b, e: self.move_element(1))
right_event_box.connect('enter-notify-event', self._handle_arrow_enter)
right_event_box.connect('leave-notify-event', self._handle_arrow_leave)
right_event_box.set_tooltip_text(self.TIP_RIGHT)
self.arrow_box = gtk.HBox()
self.arrow_box.show()
self.arrow_box.pack_start(left_event_box, expand=False, fill=False)
self.arrow_box.pack_end(right_event_box, expand=False, fill=False)
self.set_arrow_sensitive(False, False)
del_image = gtk.image_new_from_stock(gtk.STOCK_REMOVE,
gtk.ICON_SIZE_MENU)
del_image.show()
self.del_button = gtk.EventBox()
self.del_button.set_tooltip_text(self.TIP_DEL)
self.del_button.add(del_image)
self.del_button.show()
self.del_button.connect('button-release-event',
lambda b, e: self.remove_entry())
self.del_button.connect('enter-notify-event',
lambda b, e: b.set_state(gtk.STATE_ACTIVE))
self.del_button.connect('leave-notify-event',
lambda b, e: b.set_state(gtk.STATE_NORMAL))
self.button_box = gtk.HBox()
self.button_box.show()
self.button_box.pack_start(self.arrow_box, expand=False, fill=True)
add_image = gtk.image_new_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_MENU)
add_image.show()
self.add_button = gtk.EventBox()
self.add_button.set_tooltip_text(self.TIP_ADD)
self.add_button.add(add_image)
self.add_button.show()
self.add_button.connect('button-release-event',
lambda b, e: self.add_entry())
self.add_button.connect('enter-notify-event',
lambda b, e: b.set_state(gtk.STATE_ACTIVE))
self.add_button.connect('leave-notify-event',
lambda b, e: b.set_state(gtk.STATE_NORMAL))
self.add_del_button_box = gtk.VBox()
self.add_del_button_box.pack_start(
self.add_button, expand=False, fill=False)
self.add_del_button_box.pack_start(
self.del_button, expand=False, fill=False)
self.add_del_button_box.show()
def _handle_arrow_enter(self, arrow_event_box, event):
if arrow_event_box.get_child().state != gtk.STATE_INSENSITIVE:
arrow_event_box.set_state(gtk.STATE_ACTIVE)
def _handle_arrow_leave(self, arrow_event_box, event):
if arrow_event_box.get_child().state != gtk.STATE_INSENSITIVE:
arrow_event_box.set_state(gtk.STATE_NORMAL)
def set_arrow_sensitive(self, is_left_sensitive, is_right_sensitive):
"""Control the sensitivity of the movement buttons."""
sens_tuple = (is_left_sensitive, is_right_sensitive)
for i, event_box in enumerate(self.arrow_box.get_children()):
event_box.get_child().set_sensitive(sens_tuple[i])
if not sens_tuple[i]:
event_box.set_state(gtk.STATE_NORMAL)
def move_element(self, num_places_right):
"""Move the entry left or right."""
entry = self.last_selected_src
if entry is None:
return
old_index = self.entries.index(entry)
if (old_index + num_places_right < 0 or
old_index + num_places_right > len(self.entries) - 1):
return
self.entries.remove(entry)
self.entries.insert(old_index + num_places_right, entry)
self.populate_table()
self.setter(entry)
def get_entry(self, value_item):
"""Create a gtk Entry for this array element."""
entry = gtk.Entry()
entry.set_text(value_item)
entry.connect('focus-in-event',
self._handle_focus_on_entry)
entry.connect("button-release-event",
self._handle_middle_click_paste)
entry.connect_after("paste-clipboard", self.setter)
entry.connect_after("key-release-event",
lambda e, v: self.setter(e))
entry.connect_after("button-release-event",
lambda e, v: self.setter(e))
entry.connect('focus-out-event',
self._handle_focus_off_entry)
entry.set_width_chars(self.chars_width - 1)
entry.show()
return entry
def populate_table(self, focus_widget=None):
"""Populate a table with the array elements, dynamically."""
position = None
table_widgets = self.entries + [self.button_box]
table_children = self.entry_table.get_children()
if focus_widget is None:
for child in table_children:
if child.is_focus() and isinstance(child, gtk.Entry):
focus_widget = child
position = focus_widget.get_position()
else:
position = focus_widget.get_position()
for child in self.entry_table.get_children():
self.entry_table.remove(child)
if (focus_widget is None and self.entry_table.is_focus() and
len(self.entries) > 0):
focus_widget = self.entries[-1]
position = len(focus_widget.get_text())
num_fields = len(self.entries + [self.button_box])
num_rows_now = 1 + (num_fields - 1) / self.num_allowed_columns
self.entry_table.resize(num_rows_now, self.num_allowed_columns)
if (self.max_length.isdigit() and
len(self.entries) >= int(self.max_length)):
self.add_button.hide()
else:
self.add_button.show()
if (self.max_length.isdigit() and
len(self.entries) <= int(self.max_length)):
self.del_button.hide()
elif len(self.entries) == 0:
self.del_button.hide()
else:
self.del_button.show()
if (self.last_selected_src is not None and
self.last_selected_src in self.entries):
index = self.entries.index(self.last_selected_src)
if index == 0:
self.set_arrow_sensitive(False, True)
elif index == len(self.entries) - 1:
self.set_arrow_sensitive(True, False)
if len(self.entries) < 2:
self.set_arrow_sensitive(False, False)
if self.has_titles:
for col, label in enumerate(self.metadata['element-titles']):
if col >= len(table_widgets) - 1:
break
widget = gtk.HBox()
label = gtk.Label(self.metadata['element-titles'][col])
label.show()
widget.pack_start(label, expand=True, fill=True)
widget.show()
self.entry_table.attach(widget,
col, col + 1,
0, 1,
xoptions=gtk.FILL,
yoptions=gtk.SHRINK)
for i, widget in enumerate(table_widgets):
if isinstance(widget, gtk.Entry):
if self.is_char_array or self.is_quoted_array:
w_value = widget.get_text()
widget.set_tooltip_text(self.TIP_ELEMENT_CHAR.format(
(i + 1), w_value))
else:
widget.set_tooltip_text(self.TIP_ELEMENT.format((i + 1)))
row = i // self.num_allowed_columns
if self.has_titles:
row += 1
column = i % self.num_allowed_columns
self.entry_table.attach(widget,
column, column + 1,
row, row + 1,
xoptions=gtk.FILL,
yoptions=gtk.SHRINK)
if focus_widget is not None:
focus_widget.grab_focus()
focus_widget.set_position(position)
focus_widget.select_region(position, position)
self.grab_focus = lambda: self.hook.get_focus(
self._get_widget_for_focus())
self.check_resize()
def reshape_table(self):
"""Reshape a table according to the space allocated."""
total_x_bound = self.entry_table.get_allocation().width
if not len(self.entries):
return False
entries_bound = sum([e.get_allocation().width for e in self.entries])
each_entry_bound = entries_bound / len(self.entries)
maximum_entry_number = float(total_x_bound) / float(each_entry_bound)
rounded_max = int(maximum_entry_number) + 1
if rounded_max != self.num_allowed_columns + 2 and rounded_max > 2:
self.num_allowed_columns = max(1, rounded_max - 2)
self.populate_table()
def add_entry(self):
"""Add a new entry (with null text) to the variable array."""
entry = self.get_entry('')
self.entries.append(entry)
self._adjust_entry_length()
self.populate_table(focus_widget=entry)
if (self.metadata.get(rose.META_PROP_COMPULSORY) !=
rose.META_PROP_VALUE_TRUE):
self.setter(entry)
def remove_entry(self):
"""Remove the last selected or the last entry."""
if (self.last_selected_src is not None and
self.last_selected_src in self.entries):
entry = self.entries.pop(self.last_selected_src)
self.last_selected_src = None
else:
entry = self.entries.pop()
self.populate_table()
self.setter(entry)
def setter(self, widget):
"""Reconstruct the new variable value from the entry array."""
val_array = [e.get_text() for e in self.entries]
max_length = max([len(v) for v in val_array] + [1])
if max_length + 1 != self.chars_width:
self.chars_width = max_length + 1
self._adjust_entry_length()
if widget is not None and not widget.is_focus():
widget.grab_focus()
widget.set_position(len(widget.get_text()))
widget.select_region(widget.get_position(),
widget.get_position())
if self.is_char_array:
for i, val in enumerate(val_array):
val_array[i] = (
rose.config_editor.util.text_from_character_widget(val))
elif self.is_quoted_array:
for i, val in enumerate(val_array):
val_array[i] = (
rose.config_editor.util.text_from_quoted_widget(val))
entries_have_commas = any("," in v for v in val_array)
new_value = rose.variable.array_join(val_array)
if new_value != self.value:
self.value = new_value
self.set_value(new_value)
if (entries_have_commas and
not (self.is_char_array or self.is_quoted_array)):
new_val_array = rose.variable.array_split(new_value)
if len(new_val_array) != len(self.entries):
self.generate_entries()
focus_index = None
for i, val in enumerate(val_array):
if "," in val:
val_post_comma = val[:val.index(",") + 1]
focus_index = len(rose.variable.array_join(
new_val_array[:i] + [val_post_comma]))
self.populate_table()
self.set_focus_index(focus_index)
return False
def _adjust_entry_length(self):
for entry in self.entries:
entry.set_width_chars(self.chars_width)
entry.set_max_length(self.chars_width)
self.reshape_table()
def _get_widget_for_focus(self):
if self.entries:
return self.entries[-1]
return self.entry_table
def _handle_focus_off_entry(self, widget, event):
if widget == self.last_selected_src:
try:
widget.set_progress_fraction(1.0)
except AttributeError:
widget.drag_highlight()
if widget.get_position() is None:
widget.set_position(len(widget.get_text()))
def _handle_focus_on_entry(self, widget, event):
if self.last_selected_src is not None:
try:
self.last_selected_src.set_progress_fraction(0.0)
except AttributeError:
self.last_selected_src.drag_unhighlight()
self.last_selected_src = widget
is_start = (widget in self.entries and self.entries[0] == widget)
is_end = (widget in self.entries and self.entries[-1] == widget)
self.set_arrow_sensitive(not is_start, not is_end)
if widget.get_text() != '':
widget.select_region(widget.get_position(),
widget.get_position())
return False
def _handle_middle_click_paste(self, widget, event):
if event.button == 2:
self.setter(widget)
return False
def get_next_delimiter(array_text, next_element):
"""Return the part of array_text immediately preceding next_element."""
try:
val = array_text.index(next_element)
except ValueError:
# Substring not found.
return
if val == 0 and len(array_text) > 1: # Null or whitespace element.
while array_text[val].isspace():
val += 1
if array_text[val] == ",":
val += 1
return array_text[:val]
| aosprey/rose | lib/python/rose/config_editor/valuewidget/array/entry.py | Python | gpl-3.0 | 21,099 |
# -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import json
try:
from dc2.lib.web.controllers import Controller
except ImportError:
print('you do not have dc2.lib installed')
sys.exit(1)
class JSONController(Controller):
def _content_type(self, *args, **kwargs):
return 'application/json; charset=utf-8'
def _prepare_output(self, *args, **kwargs):
# Takes -> result
content = kwargs.get('result', None)
result = {
'format': 'json',
'content-type': self._content_type(),
'output': json.dumps(content)}
return result
| sadig/DC2 | components/dc2-admincenter/dc2/admincenter/lib/controllers/jsoncontroller.py | Python | gpl-2.0 | 1,430 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from time import sleep
import urlparse
from ConfigParser import ConfigParser
import pickle
import requests
def config():
global video_format
global resolution
configr = ConfigParser()
configr.read('settings.ini')
quality = configr.get('SETTINGS', 'video_quality')
qualities = {'android': ['107', '71'], '360p': ['106', '60'], '480p': ['106', '61'],
'720p': ['106', '62'], '1080p': ['108', '80'], 'highest': ['0', '0']}
video_format = qualities[quality][0]
resolution = qualities[quality][1]
global lang
global lang2
lang = configr.get('SETTINGS', 'language')
lang2 = configr.get('SETTINGS', 'language2')
langd = {'Espanol_Espana': u'Español (Espana)', 'Francais': u'Français (France)', 'Portugues': u'Português (Brasil)',
'English': u'English', 'Espanol': u'Español', 'Turkce': u'Türkçe', 'Italiano': u'Italiano',
'Arabic': u'العربية', 'Deutsch': u'Deutsch'}
lang = langd[lang]
lang2 = langd[lang2]
forcesub = configr.getboolean('SETTINGS', 'forcesubtitle')
global forceusa
forceusa = configr.getboolean('SETTINGS', 'forceusa')
global localizecookies
localizecookies = configr.getboolean('SETTINGS', 'localizecookies')
onlymainsub = configr.getboolean('SETTINGS', 'onlymainsub')
return [lang, lang2, forcesub, forceusa, localizecookies, quality, onlymainsub]
#def playerrev(url):
# global player_revision
#
# revision_regex = r"swfobject.embedSWF\(\"(?:.+)'(?P<revision>[\d.]+)'(?:.+)\)"
# try:
# player_revision = re.search(revision_regex, gethtml(url)).group("revision")
# except IndexError:
# try:
# url += '?skip_wall=1' # perv
# html = gethtml(url)
# player_revision = re.search(revision_regex, html).group("revision")
# except IndexError:
# open('debug.html', 'w').write(html.encode('utf-8'))
# sys.exit('Sorry, but it looks like something went wrong with accessing the Crunchyroll page. Please make an issue on GitHub and attach debug.html which should be in the folder.')
# return player_revision
def gethtml(url):
with open('cookies') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
session = requests.session()
session.cookies = cookies
del session.cookies['c_visitor']
if not forceusa and localizecookies:
session.cookies['c_locale']={u'Español (Espana)' : 'esES', u'Français (France)' : 'frFR', u'Português (Brasil)' : 'ptBR',
u'English' : 'enUS', u'Español' : 'esLA', u'Türkçe' : 'enUS', u'Italiano' : 'itIT',
u'العربية' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
except:
sleep(10) # sleep so we don't overload crunblocker
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
print 'Apparently not a URL'
sys.exit()
data = {'Referer': 'http://crunchyroll.com/', 'Host': 'www.crunchyroll.com',
'User-Agent': 'Mozilla/5.0 Windows NT 6.1; rv:26.0 Gecko/20100101 Firefox/26.0'}
res = session.get(url, params=data)
res.encoding = 'UTF-8'
return res.text
def getxml(req, med_id):
url = 'http://www.crunchyroll.com/xml/'
if req == 'RpcApiSubtitle_GetXml':
payload = {'req': 'RpcApiSubtitle_GetXml', 'subtitle_script_id': med_id}
elif req == 'RpcApiVideoPlayer_GetStandardConfig':
payload = {'req': 'RpcApiVideoPlayer_GetStandardConfig', 'media_id': med_id, 'video_format': video_format,
'video_quality': resolution, 'auto_play': '1', 'show_pop_out_controls': '1',
'current_page': 'http://www.crunchyroll.com/'}
else:
payload = {'req': req, 'media_id': med_id, 'video_format': video_format, 'video_encode_quality': resolution}
with open('cookies') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
session = requests.session()
session.cookies = cookies
del session.cookies['c_visitor']
if not forceusa and localizecookies:
session.cookies['c_locale']={u'Español (Espana)' : 'esES', u'Français (France)' : 'frFR', u'Português (Brasil)' : 'ptBR',
u'English' : 'enUS', u'Español' : 'esLA', u'Türkçe' : 'enUS', u'Italiano' : 'itIT',
u'العربية' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
except:
sleep(10) # sleep so we don't overload crunblocker
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
headers = {'Referer': 'http://static.ak.crunchyroll.com/swf/ChromelessPlayerApp.swf',
'Host': 'www.crunchyroll.com', 'Content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0)'}
res = session.post(url, params=payload, headers=headers)
res.encoding = 'UTF-8'
return res.text
def vidurl(url, season, ep): # experimental, although it does help if you only know the program page.
res = gethtml(url)
try:
print re.findall('<img id=\"footer_country_flag\".+?title=\"(.+?)\"', res, re.DOTALL)[0]
except:
pass
# open('video.html', 'w').write(res.encode('utf-8'))
slist = re.findall('<a href="#" class="season-dropdown content-menu block text-link strong(?: open| ) '
'small-margin-bottom" title="(.+?)"', res)
if slist: # multiple seasons
if len(re.findall('<a href=".+episode-(01|1)-(.+?)"', res)) > 1: # dirty hack, I know
# print list(reversed(slist))
# season = int(raw_input('Season number: '))
# season = sys.argv[3]
# ep = raw_input('Episode number: ')
# ep = sys.argv[2]
season = slist[int(season)]
# import pdb
# pdb.set_trace()
return 'http://www.crunchyroll.com' + re.findall(
'<a href="(.+episode-0?' + ep + '-(?:.+-)?[0-9]{6})"', res)[slist.index(season)]
else:
# print list(reversed(re.findall('<a href=".+episode-(.+?)-',res)))
# ep = raw_input('Episode number: ')
# ep = sys.argv[2]
return 'http://www.crunchyroll.com' + re.findall('<a href="(.+episode-0?' + ep + '-(?:.+-)?[0-9]{6})"',
res).pop()
else:
# 'http://www.crunchyroll.com/media-'
# print re.findall('<a href=\"(.+?)\" title=\"(.+?)\"
# class=\"portrait-element block-link titlefix episode\"', res)
# epnum = raw_input('Episode number: ')
# epnum = sys.argv[2]
return 'http://www.crunchyroll.com' + \
re.findall('<a href=\"(.+?)\" .+ class=\"portrait-element block-link titlefix episode\"', res)[int(ep)]
| einstein95/crunchy-xml-decoder | crunchy-xml-decoder/altfuncs.py | Python | gpl-2.0 | 7,465 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# import module snippets
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
author: "Sebastian Kornehl (@skornehl)"
requirements: [datadog]
options:
api_key:
description: ["Your DataDog API key."]
required: true
app_key:
description: ["Your DataDog app key."]
required: true
state:
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'muted', 'unmuted']
tags:
description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
required: false
default: None
version_added: "2.2"
type:
description:
- "The type of the monitor."
- The 'event alert'is available starting at Ansible 2.1
required: false
default: null
choices: ['metric alert', 'service check', 'event alert']
query:
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
required: false
default: null
name:
description: ["The name of the alert."]
required: true
message:
description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
required: false
default: null
silenced:
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
required: false
default: ""
notify_no_data:
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
required: false
default: False
no_data_timeframe:
description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
required: false
default: null
renotify_interval:
description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
required: false
default: null
escalation_message:
description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
required: false
default: null
notify_audit:
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
required: false
default: False
thresholds:
description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
required: false
default: {'ok': 1, 'critical': 1, 'warning': 1}
locked:
description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
required: false
default: False
version_added: "2.2"
require_full_window:
description: ["A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped."]
required: false
default: null
version_added: "2.3"
'''
EXAMPLES = '''
# Create a metric monitor
datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Deletes a monitor
datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Mutes a monitor
datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Unmutes a monitor
datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
notify_audit=dict(required=False, default=False, type='bool'),
thresholds=dict(required=False, type='dict', default=None),
tags=dict(required=False, type='list', default=None),
locked=dict(required=False, default=False, type='bool'),
require_full_window=dict(required=False, default=None, type='bool')
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
if message:
return message.replace('[[', '{{').replace(']]', '}}')
return message
def _get_monitor(module):
for monitor in api.Monitor.get_all():
if monitor['name'] == module.params['name']:
return monitor
return {}
def _post_monitor(module, options):
try:
kwargs = dict(type=module.params['type'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
kwargs = dict(id=monitor['id'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.update(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
"locked": module.boolean(module.params['locked']),
"require_full_window" : module.params['require_full_window']
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None
and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| CenturylinkTechnology/ansible-modules-extras | monitoring/datadog_monitor.py | Python | gpl-3.0 | 12,600 |
import win32api
import Tkinter
import win32con
from gui.widgets import OptionMenu, Buttons, Textboxes
from gui.widgets.frames import Frame
from gui.widgets.frames.tabs import DisableDeleteNotebookTab
import constants as c
class WindowTab(DisableDeleteNotebookTab.Disable):
def __init__(self, parent, row, column, monitorFreqChanged, **kwargs):
Frame.Frame.__init__(self, parent, c.WINDOW_TAB, row, column, **kwargs)
self.monitorFreqChanged = monitorFreqChanged
self.loading_default_value = True
monitor_names = self.getMonitorNames()
monitor_command = lambda: self.updateMonitorFreqTextbox(self.widgets_dict[c.WINDOW_MONITOR].widget, self.widgets_dict[c.WINDOW_MONITOR].variable, self.widgets_dict[c.WINDOW_FREQ])
refresh_command = lambda: self.refreshMonitorNames(self.widgets_dict[c.WINDOW_MONITOR].widget, self.widgets_dict[c.WINDOW_MONITOR].variable, self.widgets_dict[c.WINDOW_FREQ])
self.addChildWidgets((
Textboxes.LabelTextbox (self.widget, c.WINDOW_WIDTH, 0, 0, command=int, default_value=800),
Textboxes.LabelTextbox (self.widget, c.WINDOW_HEIGHT, 0, 2, command=int, default_value=600),
Textboxes.ColorTextboxFrame(self.widget, c.WINDOW_COLOR, 0, 4, default_value="#000000"),
Textboxes.LabelTextbox (self.widget, c.WINDOW_FREQ, 1, 0, command=self.freqTextboxCommand, default_value=self.getMonitorFrequency(monitor_names[0])),
Buttons.Button (self.widget, c.WINDOW_REFRESH, 1, 2, command=refresh_command),
OptionMenu.OptionMenu (self.widget, c.WINDOW_MONITOR, 2, 1, monitor_names, command=monitor_command, columnspan=3),
self.getDisableButton (2, 4)
))
def freqTextboxCommand(self, value):
float(value)
if not self.loading_default_value:
self.monitorFreqChanged()
else:
self.loading_default_value = False
def getMonitorNames(self):
return [win32api.GetMonitorInfo(monitor[0])["Device"] for monitor in win32api.EnumDisplayMonitors()]
def getMonitorFrequency(self, monitor_name):
return getattr(win32api.EnumDisplaySettings(monitor_name, win32con.ENUM_CURRENT_SETTINGS), "DisplayFrequency")
def refreshMonitorNames(self, widget, var, textbox):
widget["menu"].delete(0, Tkinter.END)
for monitor_name in self.getMonitorNames():
widget["menu"].add_command(label=monitor_name, command=lambda x=monitor_name: (var.set(x), self.updateMonitorFreqTextbox(widget, var, textbox)))
self.updateMonitorFreqTextbox(widget, var, textbox)
def updateMonitorFreqTextbox(self, widget, var, textbox):
monitor_names = self.getMonitorNames()
if var.get() not in monitor_names:
var.set(monitor_names[0])
self.refreshMonitorNames(widget, var, textbox)
else:
textbox.setValue(self.getMonitorFrequency(var.get()))
textbox.validate()
| kahvel/VEP-BCI | src/gui/widgets/frames/tabs/WindowTab.py | Python | mit | 3,034 |
"""Moira list tasks"""
import logging
from django.contrib.auth import get_user_model
from channels.membership_api import update_memberships_for_managed_channels
from moira_lists.models import MoiraList
from moira_lists import moira_api
from open_discussions.celery import app
User = get_user_model()
log = logging.getLogger()
@app.task
def update_user_moira_lists(user_id, update_memberships=False):
"""
Update the user's moira lists
Args:
user_id (int): User id
update_memberships (bool): Whether to update memberships afterward
"""
moira_api.update_user_moira_lists(User.objects.get(id=user_id))
if update_memberships:
update_memberships_for_managed_channels(user_ids=[user_id])
@app.task
def update_moira_list_users(names, channel_ids=None):
"""
Update the users for each moira list
Args:
names (list of str): Moira list name
channel_ids (list of int): Channel id's
"""
for name in names:
moira_list, _ = MoiraList.objects.get_or_create(name=name)
moira_api.update_moira_list_users(moira_list)
if channel_ids is not None:
update_memberships_for_managed_channels(channel_ids=channel_ids)
| mitodl/open-discussions | moira_lists/tasks.py | Python | bsd-3-clause | 1,210 |
"""User-defined positioned parser example.
This shows how a new parser can be defined outside Parsita and used in tandem
with the built-in parsers. The ``positioned`` parser updates the value
returned from an arbitrary parser with the position in the input that was
consumed by that parser.
"""
from abc import abstractmethod
from dataclasses import dataclass
from typing import Generic
from parsita import Parser, TextParsers, reg
from parsita.state import Continue, Input, Output, Reader, Status
from parsita.util import splat
class PositionAware(Generic[Output]):
"""An object which can cooperate with the positioned parser.
The ``positioned`` parser calls the ``set_position`` method on values it
receives. This abstract base class marks those objects that can cooperate
with ``positioned`` in this way and receive the input position to produce
the final value.
"""
@abstractmethod
def set_position(self, start: int, length: int) -> Output:
"""Produce a new value with the position set.
This abstract method must be implemented by subclasses of
``PositionAware``. It receives the position in the input that was
consumed and returns a new value, typically an object similar to the old
value, but with the position set. Important: the old value is not
expected to be mutated.
Args:
start: The index of the first character consumed by the parser
length: The number of characters consumed by the parser
"""
pass
class PositionedParser(Generic[Input, Output], Parser[Input, Output]):
def __init__(self, parser: Parser[Input, PositionAware[Output]]):
super().__init__()
self.parser = parser
def consume(self, reader: Reader[Input]) -> Status[Input, Output]:
start = reader.position
status = self.parser.consume(reader)
if isinstance(status, Continue):
end = status.remainder.position
return Continue(status.remainder, status.value.set_position(start, end - start)).merge(status)
else:
return status
def __repr__(self):
return self.name_or_nothing() + "positioned({})".format(self.parser.name_or_repr())
def positioned(parser: Parser[Input, PositionAware[Output]]):
"""Set the position on a PositionAware value.
This parser matches ``parser`` and, if successful, calls ``set_position``
on the produced value to produce a new value. The value produces by
``parser`` must implement the ``PositionAware`` interface so that it can
receive the position in the input.
Args:
parser: Parser
"""
return PositionedParser(parser)
# Everything below here is an example use case
@dataclass
class UnfinishedVariable(PositionAware):
name: str
def set_position(self, start: int, length: int):
return Variable(self.name, start, length)
@dataclass
class Variable:
name: str
start: int
length: int
@dataclass
class Plus:
first: Variable
second: Variable
class PlusParsers(TextParsers):
variable = positioned(reg("[A-Za-z][A-Za-z0-9_]*") > UnfinishedVariable)
plus = variable & "+" >> variable > splat(Plus)
if __name__ == "__main__":
print(PlusParsers.plus.parse("abc + xyz").or_die())
| drhagen/parsita | examples/positioned.py | Python | mit | 3,313 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import jsontools as json
host = 'https://vjav.com' # https://hdzog.com https://upornia.com https://vjav.com https://txxx.com
url_api = host + "/api/json/videos/%s/str/%s/60/%s.%s.1.all..%s.json"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Ultimas" , action="lista", url=url_api % ("14400", "latest-updates", "", "", "")))
itemlist.append(item.clone(title="Mejor valoradas" , action="lista", url=url_api % ("14400", "top-rated", "", "", "month")))
itemlist.append(item.clone(title="Mas popular" , action="lista", url=url_api % ("14400", "most-popular", "", "", "month")))
# itemlist.append(item.clone(title="Mas vistos" , action="lista", url=url_api % ("14400", "most-viewed", "", "", "month") ))
itemlist.append(item.clone(title="Mas comentado" , action="lista", url=url_api % ("14400", "most-commented", "", "", "month") ))
itemlist.append(item.clone(title="Pornstar" , action="pornstar", url=host + "/api/json/models/86400/str/filt........../most-popular/48/1.json"))
# itemlist.append(item.clone(title="Canal" , action="catalogo", url=host + "/api/json/channels/86400/str/latest-updates/80/..1.json"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url=host + "/api/json/categories/14400/str.all.json"))
itemlist.append(item.clone(title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s/api/videos.php?params=259200/str/relevance/60/search..1.all..&s=%s" % (host,texto)
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def pornstar(item):
logger.info()
itemlist = []
headers = {'Referer': "%s" % host}
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub(r"\n|\r|\t| |<br>", "", data)
JSONData = json.load(data)
for cat in JSONData["models"]:
scrapedtitle = cat["title"]
dir = cat["dir"]
scrapedthumbnail = cat["img"]
num = cat["statistics"]
n = 'videos'
num = num.get(n,n)
thumbnail = scrapedthumbnail.replace("\/", "/")
scrapedplot = ""
url = url_api % ("14400", "latest-updates", "model", dir, "")
title = "%s (%s)" %(scrapedtitle,num)
itemlist.append(item.clone(action="lista", title=title, url=url, thumbnail=thumbnail , plot=scrapedplot) )
total= int(JSONData["total_count"])
page = int(scrapertools.find_single_match(item.url,'.*?(\d+).json'))
url_page = scrapertools.find_single_match(item.url,'(.*?)\d+.json')
next_page = (page+ 1)
if (page*60) < total:
next_page = "%s%s.json" %(url_page,next_page)
itemlist.append(item.clone(action="pornstar", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def catalogo(item):
logger.info()
itemlist = []
headers = {'Referer': "%s" % host}
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub(r"\n|\r|\t| |<br>", "", data)
JSONData = json.load(data)
for cat in JSONData["channels"]:
scrapedtitle = cat["title"]
dir = cat["dir"]
scrapedthumbnail = cat["img"]
num = cat["statistics"]
n = 'videos'
num = num.get(n,n)
thumbnail = scrapedthumbnail.replace("\/", "/")
scrapedplot = ""
url = url_api % ("7200", "latest-updates", "channel", dir, "")
title = "%s (%s)" %(scrapedtitle,num)
itemlist.append(item.clone(action="lista", title=title , url=url ,
thumbnail=thumbnail , plot=scrapedplot) )
total= int(JSONData["total_count"])
page = int(scrapertools.find_single_match(item.url,'.*?.(\d+).json'))
url_page = scrapertools.find_single_match(item.url,'(.*?).\d+.json')
next_page = (page+ 1)
if (page*60) < total:
next_page = "%s.%s.json" %(url_page,next_page)
itemlist.append(item.clone(action="catalogo", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
headers = {'Referer': "%s" % host}
data = httptools.downloadpage(item.url, headers=headers).data
JSONData = json.load(data)
for cat in JSONData["categories"]:
scrapedtitle = cat["title"]
dir = cat["dir"]
num = cat["total_videos"]
url = url_api % ("14400", "most-popular", "categories", dir, "day")
thumbnail = ""
scrapedplot = ""
title = "%s (%s)" %(scrapedtitle,num)
itemlist.append(item.clone(action="lista", title=title, url=url, thumbnail=thumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
headers = {'Referer': "%s" % host}
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub(r"\n|\r|\t| |<br>", "", data)
JSONData = json.load(data)
for Video in JSONData["videos"]:
video_id = Video["video_id"]
dir = Video["dir"]
scrapedtitle = Video["title"]
duration = Video["duration"]
scrapedthumbnail = Video["scr"]
scrapedhd = Video["props"]
scrapedurl = "%s/embed/%s" %(host,video_id)
if scrapedhd:
title = "[COLOR yellow]%s[/COLOR] [COLOR tomato]HD[/COLOR] %s" % (duration, scrapedtitle)
else:
title = "[COLOR yellow]%s[/COLOR] %s" % (duration, scrapedtitle)
thumbnail = scrapedthumbnail.replace("\/", "/")
plot = ""
action = "play"
if logger.info() == False:
action = "findvideos"
itemlist.append(item.clone(action=action, title=title , url=scrapedurl, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle=title) )
total= int(JSONData["total_count"])
page = int(scrapertools.find_single_match(item.url,'(\d+).all..'))
url_page = scrapertools.find_single_match(item.url,'(.*?).\d+.all..')
post = scrapertools.find_single_match(item.url,'all..(.*)')
next_page = (page+ 1)
if (page*60) < total:
next_page = "%s.%s.all..%s" %(url_page,next_page,post)
itemlist.append(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title= "%s", contentTitle= item.title, url=item.url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def play(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title= "%s", contentTitle= item.title, url=item.url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist | alfa-addon/addon | plugin.video.alfa/channels/vjav.py | Python | gpl-3.0 | 7,630 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 MicroEra s.r.l.
# (<http://www.microera.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_private_price
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| appendif/microera | product_private_price/__init__.py | Python | agpl-3.0 | 1,045 |
# -*- coding: utf-8 -*-
# Django settings for layer zero pinax project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
PYCON_YEAR = "2011"
# tells Pinax to use the default theme
PINAX_THEME = "default"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"NAME": os.path.join(PROJECT_ROOT, "dev.db"), # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/%s/site_media/media/" % PYCON_YEAR
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/%s/site_media/static/" % PYCON_YEAR
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "media"),
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = ""
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django_openid.consumer.SessionConsumer",
"django.contrib.messages.middleware.MessageMiddleware",
"pinax.middleware.security.HideSensistiveFieldsMiddleware",
#"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "pycon_project.urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"staticfiles.context_processors.static_url",
"pinax.core.context_processors.pinax_settings",
"pinax.apps.account.context_processors.account",
"review.context_processors.permissions",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.humanize",
# external
"mailer",
"uni_form",
"staticfiles",
"debug_toolbar",
"markitup",
"nashvegas",
"emailconfirmation",
"timezones",
"django_openid",
"oauth_access",
"uni_form",
"ajax_validation",
"fixture_generator",
"wakawaka",
"biblion",
"fixture_generator",
"sorl.thumbnail",
# Pinax
"pinax.templatetags",
"pinax.apps.waitinglist",
"pinax.apps.account",
# project
"speakers",
"proposals",
"sponsors",
"review",
"boxes",
"schedule",
"user_mailer",
"wiki",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
CONTACT_EMAIL = "pycon@eldarion.com" # @@@ temporary
SITE_NAME = "PyCon 2011 Atlanta - A Conference for the Python Community"
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
BIBLION_SECTIONS = [
("general", "General"),
]
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = True
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = True
AUTHENTICATION_BACKENDS = [
"pinax.apps.account.auth_backends.AuthenticationBackend",
]
LOGIN_REDIRECT_URLNAME = "home"
LOGIN_URL = "/%s/account/login/" % PYCON_YEAR
LOGOUT_URL = "/%s/account/logout/" % PYCON_YEAR
LOGIN_REDIRECT_URLNAME = "home"
EMAIL_CONFIRMATION_DAYS = 3
WAKAWAKA_DEFAULT_INDEX = "index"
WAKAWAKA_SLUG_REGEX = r"((\w{2,})(/\w{2,})*)" # allow lower case wiki page names
WAKAWAKA_LOCK_TIMEOUT = 10*60
MARKITUP_AUTO_PREVIEW = True
MARKITUP_SET = "markitup/sets/creole"
MARKITUP_SKIN = "markitup/skins/simple"
# FIXME at some point we may need multiple filters, if we need
# creole parsing for things that don't use wiki-style links.
MARKITUP_FILTER = ("wiki.creole_parser.parse", {})
MARKITUP_MEDIA_URL = STATIC_URL
ACCEPTING_PROPOSALS = True
SCHEDULE_TIMEZONE = "US/Eastern"
REDIS_PARAMS = dict(host="127.0.0.1")
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| mitsuhiko/pycon | pycon_project/settings.py | Python | bsd-3-clause | 6,646 |
#!/usr/bin/env python
"""
Interface with wcs.
Adapted from fermipy.skymap
"""
__author__ = "Alex Drlica-Wagner"
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
def create_wcs(skydir, coordsys='CEL', projection='AIT',
cdelt=1.0, crpix=1., naxis=2, energies=None):
"""Create a WCS object.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinate of the WCS reference point.
coordsys : str
projection : str
cdelt : float
crpix : float or (float,float)
In the first case the same value is used for x and y axes
naxis : {2, 3}
Number of dimensions of the projection.
energies : array-like
Array of energies that defines the third dimension if naxis=3.
"""
w = WCS(naxis=naxis)
if coordsys == 'CEL':
w.wcs.ctype[0] = 'RA---%s' % (projection)
w.wcs.ctype[1] = 'DEC--%s' % (projection)
w.wcs.crval[0] = skydir.icrs.ra.deg
w.wcs.crval[1] = skydir.icrs.dec.deg
elif coordsys == 'GAL':
w.wcs.ctype[0] = 'GLON-%s' % (projection)
w.wcs.ctype[1] = 'GLAT-%s' % (projection)
w.wcs.crval[0] = skydir.galactic.l.deg
w.wcs.crval[1] = skydir.galactic.b.deg
else:
raise Exception('Unrecognized coordinate system.')
try:
w.wcs.crpix[0] = crpix[0]
w.wcs.crpix[1] = crpix[1]
except IndexError:
w.wcs.crpix[0] = crpix
w.wcs.crpix[1] = crpix
w.wcs.cdelt[0] = -cdelt
w.wcs.cdelt[1] = cdelt
w = WCS(w.to_header())
if naxis == 3 and energies is not None:
w.wcs.crpix[2] = 1
w.wcs.crval[2] = energies[0]
w.wcs.cdelt[2] = energies[1] - energies[0]
w.wcs.ctype[2] = 'Energy'
w.wcs.cunit[2] = 'MeV'
return w
def create_image_wcs(skydir, cdelt, npix, coordsys='CEL', projection='AIT'):
"""Create a blank image and associated WCS object
"""
if np.isscalar(npix):
npix = [npix, npix]
crpix = np.array([n / 2. + 0.5 for n in npix])
wcs = create_wcs(skydir, coordsys, projection, cdelt, crpix)
return np.zeros(npix).T, wcs
def get_pixel_skydirs(npix, wcs):
"""Get a list of sky coordinates for the centers of every pixel.
"""
if np.isscalar(npix):
npix = [npix, npix]
xpix = np.linspace(0, npix[0] - 1., npix[0])
ypix = np.linspace(0, npix[1] - 1., npix[1])
xypix = np.meshgrid(xpix, ypix, indexing='ij')
return SkyCoord.from_pixel(np.ravel(xypix[0]),
np.ravel(xypix[1]), wcs)
def create_image_hdu(data, wcs, name=None):
"""Create a `astropy.io.fits.ImageHDU` object
"""
if name is None:
return fits.PrimaryHDU(data, header=wcs.to_header())
return fits.ImageHDU(data, header=wcs.to_header(),
name=name)
def write_image_hdu(filename, data, wcs, name=None, clobber=False):
"""Write an image to a file
"""
hdu = create_image_hdu(data, wcs, name)
hdu.writeto(filename, clobber=clobber)
| kadrlica/dmsky | dmsky/utils/wcs.py | Python | mit | 3,091 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2012 Andreas Hausmann
# This file is part of TGSBot.
# Permission to copy or use is limited. Please see LICENSE for information.
#
"""A twisted client that serves as playing bot for TGS.
Based on client/twisted-client1.py.
Example:
./tgsBot.py -P8081 -u tigerI |& tee /var/log/TGS/bot/tigerI.py.log
"""
from twisted.internet import reactor, defer
from twisted.python import log
import sys
from tgsBot import get_parser
from operation.client import Dispatch
from operation.settings import Toggle, Set
from operation.invite import invite, Bots
from client.gnubgClient import set_up_testgame
from client.tgsClient import ComClientFactory
TRACE = 15
VERBOSE = 17
import logging
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(VERBOSE, 'VERBOSE')
NICK = 'test_bot_I'
KEEP_ALIVE = 300.
def start_logging(nick):
log.startLogging(open('/var/log/TGS/bots/%s.log' % nick, 'a'))
observer = log.PythonLoggingObserver()
observer.start()
class SetTest(Set):
def set_dice_file(self,):
self._dice = self.dispatch.get_gnubg().get_dice_filename()
def _set_dice(self,):
log.msg('SET sets dice to %s %s' % (self._dice, '>'*35),
logLevel=VERBOSE)
self.dispatch.send_server('set dice %s' % self._dice)
def invite_testbot(dispatch):
def invite_one(bots):
ML = dispatch.get_invite_ML()
opp = dispatch.get_invite_player()
log.msg("invite_one in .....testbot %s %s" % (ML, opp))
if opp in bots and ML is not None:
invite(dispatch, opp, ML)
else:
dispatch.relax_hook()
log.msg("invite_testbot")
bots = Bots(dispatch, dispatch.requests, invite_one)
class DispatchTest(Dispatch):
auto_invite_hook = invite_testbot
def _set_invite_MLs(self,):
print "INVITES", self.get_gnubg().get_invites()
for ml in self.get_gnubg().get_invites():
yield ml
def get_invite_player(self):
return 'playerX'
def get_invite_ML(self):
try:
ML = self.invite_MLs.next()
except StopIteration:
log.msg("Stop auto invite testbot!")
self.autoinvite = False
ML = None
return ML
def login_hook(self,):
pfos = self.protocol.factory.options
self.autoinvite = bool(self.get_gnubg().get_dice_filename())
toggle = Toggle(self, self.requests)
toggle.send_command('toggle')
if self.autoinvite:
self.nr_games = pfos.number_of_games
self.invite_MLs = self._set_invite_MLs()
settings = SetTest(self, self.requests)
settings.set_dice_file()
else:
settings = Set(self, self.requests)
settings.set_delay_value(pfos.delay)
settings.send_command('set')
self.relax_hook()
def get_gnubg(self):
return self.protocol.factory.gnubg.gnubg['gnubg']
def set_options(o):
o.evaluate_mwc = False
def usage(progname):
usg = """usage: %prog [<gid>] <test_game_data> <dice> <invitations>
%prog """ + __doc__
parser = get_parser(usg)
return parser, usg
if __name__ == "__main__":
parser,usg = usage(sys.argv[0])
(options, args) = parser.parse_args()
set_options(options)
start_logging(options.user)
factory = ComClientFactory()
factory.options = options
server_port = int(options.port)
factory.dispatcher = DispatchTest(options.user, options.password,
ka_lap=options.keep_alive,
ignore_resume=options.ignore_resume,
fixed_opponent=options.fixed_opponent)
options.number_of_games = -1
options.testgame = args[0]
bridge = set_up_testgame(options.testgame)
if not bridge is None:
factory.gnubg = bridge
bridge.set_bot(factory.dispatcher)
standard_running = True
else:
print "Can't find %s." % options.testgame
sys.exit(1)
if standard_running:
reactor.connectTCP(options.host, server_port, factory)
reactor.run()
| Watzmann/TGSBot | testBot.py | Python | gpl-3.0 | 4,170 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pex.interpreter import PythonInterpreter
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from twitter.common.collections import OrderedSet
from pants.backend.python.subsystems.pex_build_util import (PexBuilderWrapper, has_python_sources,
has_resources, is_python_target)
from pants.base.exceptions import TaskError
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.task.task import Task
from pants.util.dirutil import safe_concurrent_creation
class GatherSources(Task):
"""Gather local Python sources.
Creates an (unzipped) PEX on disk containing the local Python sources. This PEX can be merged
with a requirements PEX to create a unified Python environment for running the relevant python
code.
"""
PYTHON_SOURCES = 'python_sources'
@classmethod
def implementation_version(cls):
return super(GatherSources, cls).implementation_version() + [('GatherSources', 5)]
@classmethod
def subsystem_dependencies(cls):
return super(GatherSources, cls).subsystem_dependencies() + (PexBuilderWrapper.Factory,)
@classmethod
def product_types(cls):
return [cls.PYTHON_SOURCES]
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data(PythonInterpreter)
round_manager.optional_data('python') # For codegen.
def execute(self):
targets = self._collect_source_targets()
if not targets:
return
interpreter = self.context.products.get_data(PythonInterpreter)
with self.invalidated(targets) as invalidation_check:
pex = self._get_pex_for_versioned_targets(interpreter, invalidation_check.all_vts)
self.context.products.register_data(self.PYTHON_SOURCES, pex)
def _collect_source_targets(self):
python_target_addresses = [p.address for p in self.context.targets(predicate=is_python_target)]
targets = OrderedSet()
def collect_source_targets(target):
if has_python_sources(target) or has_resources(target):
targets.add(target)
self.context.build_graph.walk_transitive_dependency_graph(addresses=python_target_addresses,
work=collect_source_targets)
return targets
def _get_pex_for_versioned_targets(self, interpreter, versioned_targets):
if versioned_targets:
target_set_id = VersionedTargetSet.from_versioned_targets(versioned_targets).cache_key.hash
else:
raise TaskError("Can't create pex in gather_sources: No python targets provided")
source_pex_path = os.path.realpath(os.path.join(self.workdir, target_set_id))
# Note that we check for the existence of the directory, instead of for invalid_vts,
# to cover the empty case.
if not os.path.isdir(source_pex_path):
# Note that we use the same interpreter for all targets: We know the interpreter
# is compatible (since it's compatible with all targets in play).
with safe_concurrent_creation(source_pex_path) as safe_path:
self._build_pex(interpreter, safe_path, [vt.target for vt in versioned_targets])
return PEX(source_pex_path, interpreter=interpreter)
def _build_pex(self, interpreter, path, targets):
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=path, interpreter=interpreter, copy=True),
log=self.context.log)
for target in targets:
pex_builder.add_sources_from(target)
pex_builder.freeze()
| twitter/pants | src/python/pants/backend/python/tasks/gather_sources.py | Python | apache-2.0 | 3,731 |
import unittest
from hamcrest import assert_that, equal_to
from mock import MagicMock, call
from mac_os_scripts.set_user_account_logo import LocalUserAccountLogoSetter
from mac_os_scripts_tests.test_common import _NO_OUTPUT
class LocalUserAccountLogoSetterTest(unittest.TestCase):
def setUp(self):
self._subject = LocalUserAccountLogoSetter(
sudo_password='SomePassword',
)
self._subject.run_command = MagicMock()
def test_delete_user_account_logo_jpeg(self):
self._subject.run_command.return_value = _NO_OUTPUT
assert_that(
self._subject.delete_user_account_logo_jpeg(
username='SomeUser',
),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(command_line='/usr/bin/dscl . delete /Users/SomeUser JPEGPhoto', quiet=True, sudo_password_override=False, timeout=None,
send_lines=None)
])
)
def test_delete_user_account_logo(self):
self._subject.run_command.return_value = _NO_OUTPUT
assert_that(
self._subject.delete_user_account_logo(
username='SomeUser',
),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(command_line='/usr/bin/dscl . delete /Users/SomeUser Picture', quiet=True, sudo_password_override=False, timeout=None,
send_lines=None)
])
)
def test_create_user_account_logo(self):
self._subject.run_command.return_value = _NO_OUTPUT
assert_that(
self._subject.create_user_account_logo(
username='SomeUser',
logo_path='/path/to/some/logo.tif',
),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(command_line='/usr/bin/dscl . create /Users/SomeUser Picture "/path/to/some/logo.tif"', quiet=True,
sudo_password_override=False, timeout=None, send_lines=None)
])
)
def test_run_pass(self):
self._subject.delete_user_account_logo_jpeg = MagicMock()
self._subject.delete_user_account_logo_jpeg.return_value = True
self._subject.delete_user_account_logo = MagicMock()
self._subject.delete_user_account_logo.return_value = True
self._subject.create_user_account_logo = MagicMock()
self._subject.create_user_account_logo.return_value = True
assert_that(
self._subject.run(
username='SomeUser',
logo_path='/path/to/some/logo.tif'
),
equal_to(True)
)
| initialed85/mac_os_scripts | mac_os_scripts_tests/set_user_account_logo_test.py | Python | mit | 2,865 |
import copy
import time
import numpy
import numpy.random
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
SharedCPU = theano.tensor.sharedvar.TensorSharedVariable
try:
SharedGPU = theano.sandbox.cuda.var.CudaNdarraySharedVariable
except:
SharedGPU = SharedCPU
from layer import Layer, StackedLayer
from model import AutoEncoder
import pdb
class GraddescentMinibatch(object):
def __init__(self, varin, data, cost, params,
truth=None, truth_data=None, supervised=False,
batchsize=100, learningrate=0.1, momentum=0.9,
rng=None, verbose=True):
"""
Using stochastic gradient descent with momentum on data in a minibatch
update manner.
"""
# TODO: check dependencies between varin, cost, and param.
assert isinstance(varin, T.TensorVariable)
if (not isinstance(data, SharedCPU)) and \
(not isinstance(data, SharedGPU)):
raise TypeError("\'data\' needs to be a theano shared variable.")
assert isinstance(cost, T.TensorVariable)
assert isinstance(params, list)
self.varin = varin
self.data = data
self.cost = cost
self.params = params
if supervised:
if (not isinstance(truth_data, SharedCPU)) and \
(not isinstance(truth_data, SharedGPU)):
raise TypeError("\'truth_data\' needs to be a theano " + \
"shared variable.")
assert isinstance(truth, T.TensorVariable)
self.truth_data = truth_data
self.truth = truth
self.verbose = verbose
self.batchsize = batchsize
self.numbatches = self.data.get_value().shape[0] / batchsize
self.momentum = momentum
self.supervised = supervised
if rng is None:
rng = numpy.random.RandomState(1)
assert isinstance(rng, numpy.random.RandomState), \
"rng has to be a random number generater."
self.rng = rng
self.epochcount = 0
self.stepcount = 0
self.stepcost = 0.
self.steptimer = 0.
self.index = T.lscalar('batch_index_in_sgd')
self.incs = dict([(
p,
theano.shared(value=numpy.zeros(p.shape.eval(),
dtype=theano.config.floatX),
name='inc_' + p.name,
broadcastable=p.broadcastable)
) for p in self.params])
self.grad = T.grad(self.cost, self.params)
self.set_learningrate(learningrate)
params_vector = T.concatenate([p.flatten() for p in self.params])
self.get_params_value = theano.function([], params_vector)
self.ref_vector = theano.shared(value=self.get_params_value(),
name='step_ref_params',
borrow=True)
delta_vector = params_vector - self.ref_vector
norm_ref_vector = T.sqrt(T.sum(self.ref_vector ** 2))
norm_delta_vector = T.sqrt(T.sum(delta_vector ** 2))
angle_rad = T.arccos(T.dot(self.ref_vector, delta_vector) /\
(norm_ref_vector * norm_delta_vector))
self.get_step_info = theano.function([], (norm_delta_vector, angle_rad))
def set_learningrate(self, learningrate):
self.learningrate = learningrate
self.inc_updates = [] # updates the parameter increasements (i.e.
# value in the self.incs dictionary.). Due to
# momentum, the increasement itself is
# changing between epochs. Its increasing by:
# from (key) inc_params
# to (value) momentum * inc_params - lr * grad
self.updates = [] # updates the parameters of model during each epoch.
# from (key) params
# to (value) params + inc_params
for _param, _grad in zip(self.params, self.grad):
self.inc_updates.append(
(self.incs[_param],
self.momentum * self.incs[_param] - self.learningrate * _grad
)
)
self.updates.append((_param, _param + self.incs[_param]))
if not self.supervised:
self._updateincs = theano.function(
inputs = [self.index],
outputs = self.cost,
updates = self.inc_updates,
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
else:
self._updateincs = theano.function(
inputs = [self.index],
outputs = self.cost,
updates = self.inc_updates,
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize],
self.truth : self.truth_data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
self._trainmodel = theano.function(inputs=[], updates = self.updates)
def epoch(self):
start = time.time()
stepcount = 0.0
cost = 0.
self.ref_vector.set_value(self.get_params_value())
for batch_index in self.rng.permutation(self.numbatches - 1):
stepcount += 1.0
cost = (1.0 - 1.0/stepcount) * cost + \
(1.0/stepcount) * self._updateincs(batch_index)
self._trainmodel()
norm, angle_rad = self.get_step_info()
self.epochcount += 1
stop = time.time()
if self.verbose:
print 'epoch %d: %.2fs, lr %.3g cost %.6g, ' % (
self.epochcount, (stop - start), self.learningrate, cost) + \
'update norm %.3g angle(RAD) %.3f' % (norm, angle_rad)
return cost
def step(self, verbose_stride=1):
"""
Randomly pick a minibatch from dataset, and perform one step of update.
If you switch this method between self.epoch() during training, the
update norm, angle may not be immediately correct after the epoch/step
at which you switch.
"""
start = time.time()
self.ref_vector.set_value(self.get_params_value())
batch_index = self.rng.randint(0, self.numbatches - 1)
cost = self._updateincs(batch_index)
self._trainmodel()
self.stepcost += cost
self.stepcount += 1
norm, angle_rad = self.get_step_info()
stop = time.time()
self.steptimer += (stop - start)
if (self.stepcount % verbose_stride == 0) and self.verbose:
print 'minibatch %d: %.2fs, lr %.3g cost %.6g, ' % (
self.stepcount, self.steptimer, self.learningrate,
self.stepcost / verbose_stride) + \
'update norm %.3g angle(RAD) %.3f' % (norm, angle_rad)
self.steptimer = 0.
self.stepcost = 0.
return cost
def step_fast(self, verbose_stride=1):
"""
A faster implementation of step(). Removes evaluation of angles,
norms, etc.
MUCH FASTER!! ~ 20 times!!
"""
start = time.time()
batch_index = self.rng.randint(0, self.numbatches - 1)
cost = self._updateincs(batch_index)
self._trainmodel()
self.stepcost += cost
self.stepcount += 1
stop = time.time()
self.steptimer += (stop - start)
if (self.stepcount % verbose_stride == 0) and self.verbose:
print 'minibatch %d: %.2fs, lr %.3g cost %.6g ' % (
self.stepcount, self.steptimer, self.learningrate,
self.stepcost / verbose_stride)
self.steptimer = 0.
self.stepcost = 0.
return cost
def draw_gradient(self,):
raise NotImplementedError("Not implemented yet...")
class Adam(object):
def __init__(self, varin, data, cost, params,
truth=None, truth_data=None, supervised=False,
batchsize=100, learningrate=0.1, beta_1=0.9, beta_2=0.999,
rng=None, verbose=True):
"""
Adam learning rule, an implelentation for paper:
http://arxiv.org/abs/1412.6980
"""
# TODO: check dependencies between varin, cost, and param.
assert isinstance(varin, T.TensorVariable)
if (not isinstance(data, SharedCPU)) and \
(not isinstance(data, SharedGPU)):
raise TypeError("\'data\' needs to be a theano shared variable.")
assert isinstance(cost, T.TensorVariable)
assert isinstance(params, list)
self.varin = varin
self.data = data
self.cost = cost
self.params = params
if supervised:
if (not isinstance(truth_data, SharedCPU)) and \
(not isinstance(truth_data, SharedGPU)):
raise TypeError("\'truth_data\' needs to be a theano " + \
"shared variable.")
assert isinstance(truth, T.TensorVariable)
self.truth_data = truth_data
self.truth = truth
self.verbose = verbose
self.batchsize = batchsize
self.numbatches = self.data.get_value().shape[0] / batchsize
self.beta_1 = beta_1
self.beta_2 = beta_2
self.beta_1_pow = theano.shared(
numpy.asarray(1., dtype=theano.config.floatX))
self.beta_2_pow = theano.shared(
numpy.asarray(1., dtype=theano.config.floatX))
self.supervised = supervised
if rng is None:
rng = numpy.random.RandomState(1)
assert isinstance(rng, numpy.random.RandomState), \
"rng has to be a random number generater."
self.rng = rng
self.epochcount = 0
self.stepcount = 0
self.stepcost = 0.
self.steptimer = 0.
self.index = T.lscalar('batch_index_in_sgd')
self.m = dict([(
p,
theano.shared(
value=numpy.zeros(p.shape.eval(), dtype=theano.config.floatX),
name=p.name + '_m',
broadcastable=p.broadcastable
)
) for p in self.params])
self.v = dict([(
p,
theano.shared(
value=numpy.zeros(p.shape.eval(), dtype=theano.config.floatX),
name=p.name + '_v',
broadcastable=p.broadcastable
)
) for p in self.params])
self.grad = T.grad(self.cost, self.params)
self.set_learningrate(learningrate)
params_vector = T.concatenate([p.flatten() for p in self.params])
self.get_params_value = theano.function([], params_vector)
self.ref_vector = theano.shared(value=self.get_params_value(),
name='step_ref_params',
borrow=True)
delta_vector = params_vector - self.ref_vector
norm_ref_vector = T.sqrt(T.sum(self.ref_vector ** 2))
norm_delta_vector = T.sqrt(T.sum(delta_vector ** 2))
angle_rad = T.arccos(T.dot(self.ref_vector, delta_vector) /\
(norm_ref_vector * norm_delta_vector))
self.get_step_info = theano.function([], (norm_delta_vector, angle_rad))
def set_learningrate(self, learningrate):
self.learningrate = learningrate
adjusted_lr = self.learningrate * T.sqrt(1. - self.beta_2_pow) \
/ (1. - self.beta_1_pow)
self.update_beta_pows = [
(self.beta_1_pow, self.beta_1_pow * self.beta_1),
(self.beta_2_pow, self.beta_2_pow * self.beta_2)]
self.update_m = []
self.update_v = []
self.update_params = []
for _param, _grad in zip(self.params, self.grad):
self.update_m.append((
self.m[_param],
self.beta_1 * self.m[_param] + (1 - self.beta_1) * _grad
))
self.update_v.append((
self.v[_param],
self.beta_2 * self.v[_param] + (1 - self.beta_2) * _grad**2
))
self.update_params.append((
_param,
_param - adjusted_lr * self.m[_param] \
/ (T.sqrt(self.v[_param]) + 1e-8)
))
if not self.supervised:
self.fun_update_mv_betapows = theano.function(
inputs=[self.index],
outputs=self.cost,
updates=self.update_m + self.update_v + self.update_beta_pows,
givens={
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
else:
self.fun_update_mv_betapows = theano.function(
inputs=[self.index],
outputs=self.cost,
updates=self.update_m + self.update_v + self.update_beta_pows,
givens={
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize],
self.truth : self.truth_data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
self._trainmodel = theano.function(inputs=[],
updates=self.update_params)
def epoch(self):
start = time.time()
stepcount = 0.0
cost = 0.
self.ref_vector.set_value(self.get_params_value())
for batch_index in self.rng.permutation(self.numbatches - 1):
stepcount += 1.0
cost = (1.0 - 1.0/stepcount) * cost + \
(1.0/stepcount) * self.fun_update_mv_betapows(batch_index)
self._trainmodel()
norm, angle_rad = self.get_step_info()
self.epochcount += 1
stop = time.time()
if self.verbose:
print 'epoch %d: %.2fs, lr %.3g cost %.6g, ' % (
self.epochcount, (stop - start), self.learningrate, cost) + \
'update norm %.3g angle(RAD) %.3f' % (norm, angle_rad)
return cost
def step(self, verbose_stride=1):
"""
Randomly pick a minibatch from dataset, and perform one step of update.
If you switch this method between self.epoch() during training, the
update norm, angle may not be immediately correct after the epoch/step
at which you switch.
"""
start = time.time()
self.ref_vector.set_value(self.get_params_value())
batch_index = self.rng.randint(0, self.numbatches - 1)
cost = self.fun_update_mv_betapows(batch_index)
self._trainmodel()
self.stepcost += cost
self.stepcount += 1
norm, angle_rad = self.get_step_info()
stop = time.time()
self.steptimer += (stop - start)
if (self.stepcount % verbose_stride == 0) and self.verbose:
print 'minibatch %d: %.2fs, lr %.3g cost %.6g, ' % (
self.stepcount, self.steptimer, self.learningrate,
self.stepcost / verbose_stride) + \
'update norm %.3g angle(RAD) %.3f' % (norm, angle_rad)
self.steptimer = 0.
self.stepcost = 0.
return cost
def step_fast(self, verbose_stride=1):
"""
A faster implementation of step(). Removes evaluation of angles,
norms, etc.
MUCH FASTER!! ~ 20 times!!
"""
start = time.time()
batch_index = self.rng.randint(0, self.numbatches - 1)
cost = self.fun_update_mv_betapows(batch_index)
self._trainmodel()
self.stepcost += cost
self.stepcount += 1
stop = time.time()
self.steptimer += (stop - start)
if (self.stepcount % verbose_stride == 0) and self.verbose:
print 'minibatch %d: %.2fs, lr %.3g cost %.6g ' % (
self.stepcount, self.steptimer, self.learningrate,
self.stepcost / verbose_stride)
self.steptimer = 0.
self.stepcost = 0.
return cost
def draw_gradient(self,):
raise NotImplementedError("Not implemented yet...")
class ConjugateGradient(object):
def __init__(self, varin, data, cost, params,
truth=None, truth_data=None, supervised=False,
batchsize=100, learningrate=0.1, momentum=0.9,
rng=None, verbose=True):
"""
Using stochastic gradient descent with momentum on data in a minibatch
update manner.
"""
print "ERROR: Not usable now."
class FeedbackAlignment(object):
def __init__(self, model, data, truth_data,
batchsize=100, learningrate=0.1, rng=None, verbose=True):
"""
It works for both linear and nonlinear layers, as long as they have
the activ_prime() method.
Cost is defined intrinsicaly as the MSE between target y vector and
real y vector at the top layer.
Parameters:
------------
model : StackedLayer
data : theano.compile.SharedVariable
truth_data : theano.compile.SharedVariable
Notes:
------------
"""
if (not isinstance(data, SharedCPU)) and \
(not isinstance(data, SharedGPU)):
raise TypeError("\'data\' needs to be a theano shared variable.")
if (not isinstance(truth_data, SharedCPU)) and \
(not isinstance(truth_data, SharedGPU)):
raise TypeError("\'truth_data\' needs to be a theano shared variable.")
self.varin = model.models_stack[0].varin
self.truth = T.lmatrix('trurh_fba')
self.data = data
self.truth_data = truth_data
self.model = model
self.output = model.models_stack[-1].output()
self.verbose = verbose
self.batchsize = batchsize
self.numbatches = self.data.get_value().shape[0] / batchsize
if rng is None:
rng = numpy.random.RandomState(1)
assert isinstance(rng, numpy.random.RandomState), \
"rng has to be a random number generater."
self.rng = rng
self.error = (self.truth - self.output) * \
self.model.models_stack[-1].activ_prime()
# set fixed random matrix
self.fixed_B = [None, ]
for imod in self.model.models_stack[1:]:
i_layer_B = []
for ipar in imod.params:
rnd = numpy.asarray(
self.rng.uniform(
low = -4 * numpy.sqrt(6. / (imod.n_in + imod.n_out)),
high = 4 * numpy.sqrt(6. / (imod.n_in + imod.n_out)),
size = ipar.get_value().shape
),
dtype=ipar.dtype
)
i_layer_B.append(
theano.shared(value = rnd, name=ipar.name + '_fixed',
borrow=True)
)
self.fixed_B.append(i_layer_B)
self.epochcount = 0
self.index = T.lscalar('batch_index_in_fba')
self._get_cost = theano.function(
inputs = [self.index],
outputs = T.sum(self.error ** 2),
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize],
self.truth : self.truth_data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
self.set_learningrate(learningrate)
def set_learningrate(self, learningrate):
self.learningrate = learningrate
layer_error = self.error
self.layer_learning_funcs = []
for i in range(len(self.model.models_stack) - 1, -1, -1):
iupdates = []
iupdates.append((
self.model.models_stack[i].w,
self.model.models_stack[i].w + self.learningrate * \
T.dot(self.model.models_stack[i].varin.T, layer_error)
)) # w
iupdates.append((
self.model.models_stack[i].b,
self.model.models_stack[i].b + self.learningrate * \
T.mean(layer_error, axis=0)
)) # b
if i > 0: # exclude the first layer.
layer_error = T.dot(layer_error, self.fixed_B[i][0].T) * \
self.model.models_stack[i-1].activ_prime()
self.layer_learning_funcs.append(
theano.function(
inputs = [self.index],
outputs = self.model.models_stack[i].output(),
updates = iupdates,
givens = {
self.varin : self.data[
self.index * self.batchsize: \
(self.index+1)*self.batchsize
],
self.truth : self.truth_data[
self.index * self.batchsize: \
(self.index+1)*self.batchsize
]
}
)
)
def epoch(self):
stepcount = 0.
cost = 0.
for batch_index in self.rng.permutation(self.numbatches - 1):
stepcount += 1.
cost = (1.0 - 1.0/stepcount) * cost + \
(1.0/stepcount) * self._get_cost(batch_index)
for layer_learn in self.layer_learning_funcs:
layer_learn(batch_index)
self.epochcount += 1
if self.verbose:
print 'epoch: %d, lr: %f, cost: %f' % (
self.epochcount, self.learningrate, cost
)
return cost
class QuantizedBackProp(object):
def __init__(self, model, varin, data, cost, params,
truth=None, truth_data=None, supervised=False,
batchsize=100, learningrate=0.1, momentum=0.9,
rng=None, verbose=True):
"""
Using stochastic gradient descent with momentum on data in a minibatch
update manner.
It requires ALL layers
"""
assert isinstance(model, StackedLayer)
assert isinstance(varin, T.TensorVariable)
if (not isinstance(data, SharedCPU)) and \
(not isinstance(data, SharedGPU)):
raise TypeError("\'data\' needs to be a theano shared variable.")
assert isinstance(cost, T.TensorVariable)
assert isinstance(params, list)
self.model = model
self.varin = varin
self.data = data
self.cost = cost
self.params = params
if supervised:
if (not isinstance(truth_data, SharedCPU)) and \
(not isinstance(truth_data, SharedGPU)):
raise TypeError("\'truth_data\' needs to be a theano " + \
"shared variable.")
assert isinstance(truth, T.TensorVariable)
self.truth_data = truth_data
self.truth = truth
self.verbose = verbose
self.batchsize = batchsize
self.numbatches = self.data.get_value().shape[0] / batchsize
self.momentum = momentum
self.supervised = supervised
if rng is None:
rng = numpy.random.RandomState(1)
assert isinstance(rng, numpy.random.RandomState), \
"rng has to be a random number generater."
self.rng = rng
self.epochcount = 0
self.stepcount = 0
self.stepcost = 0.
self.steptimer = 0.
self.index = T.lscalar('batch_index_in_sgd')
self.incs = dict([(
p,
theano.shared(value=numpy.zeros(p.shape.eval(),
dtype=theano.config.floatX),
name='inc_' + p.name,
broadcastable=p.broadcastable)
) for p in self.params])
self.grad = T.grad(self.cost, self.params)
# set "gradient"
for ilayer in model.models_stack:
if hasattr(ilayer, 'quantized_bprop'):
ilayer.quantized_bprop(self.cost)
i = 0
for iparam in self.params:
if ilayer.w is iparam:
self.grad[i] = ilayer.dEdW
i += 1
# /set "gradient"
self.set_learningrate(learningrate)
params_vector = T.concatenate([p.flatten() for p in self.params])
self.get_params_value = theano.function([], params_vector)
self.ref_vector = theano.shared(value=self.get_params_value(),
name='step_ref_params',
borrow=True)
delta_vector = params_vector - self.ref_vector
norm_ref_vector = T.sqrt(T.sum(self.ref_vector ** 2))
norm_delta_vector = T.sqrt(T.sum(delta_vector ** 2))
angle_rad = T.arccos(T.dot(self.ref_vector, delta_vector) /\
(norm_ref_vector * norm_delta_vector))
self.get_step_info = theano.function([], (norm_delta_vector, angle_rad))
def set_learningrate(self, learningrate):
self.learningrate = learningrate
self.inc_updates = [] # updates the parameter increasements (i.e.
# value in the self.incs dictionary.). Due to
# momentum, the increasement itself is
# changing between epochs. Its increasing by:
# from (key) inc_params
# to (value) momentum * inc_params - lr * grad
self.updates = [] # updates the parameters of model during each epoch.
# from (key) params
# to (value) params + inc_params
for _param, _grad in zip(self.params, self.grad):
self.inc_updates.append(
(self.incs[_param],
self.momentum * self.incs[_param] - self.learningrate * _grad
)
)
self.updates.append((_param, _param + self.incs[_param]))
if not self.supervised:
self._updateincs = theano.function(
inputs = [self.index],
outputs = self.cost,
updates = self.inc_updates,
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
else:
self._updateincs = theano.function(
inputs = [self.index],
outputs = self.cost,
updates = self.inc_updates,
givens = {
self.varin : self.data[self.index * self.batchsize: \
(self.index+1)*self.batchsize],
self.truth : self.truth_data[self.index * self.batchsize: \
(self.index+1)*self.batchsize]
}
)
self._trainmodel = theano.function(inputs=[], updates = self.updates)
def epoch(self):
start = time.time()
stepcount = 0.0
cost = 0.
self.ref_vector.set_value(self.get_params_value())
for batch_index in self.rng.permutation(self.numbatches - 1):
stepcount += 1.0
# This is Roland's way of computing cost, still mean over all
# batches. It saves space and don't harm computing time...
# But a little bit unfamilliar to understand at first glance.
cost = (1.0 - 1.0/stepcount) * cost + \
(1.0/stepcount) * self._updateincs(batch_index)
self._trainmodel()
norm, angle_rad = self.get_step_info()
self.epochcount += 1
stop = time.time()
if self.verbose:
print 'epoch %d: %.2fs, lr %.3g cost %.6g, ' % (
self.epochcount, (stop - start), self.learningrate, cost) + \
'update norm %.3g angle(RAD) %.3f' % (norm, angle_rad)
return cost
def step(self, verbose_stride=1):
"""
Randomly pick a minibatch from dataset, and perform one step of update.
If you switch this method between self.epoch() during training, the
update norm, angle may not be immediately correct after the epoch/step
at which you switch.
"""
start = time.time()
self.ref_vector.set_value(self.get_params_value())
batch_index = self.rng.randint(0, self.numbatches - 1)
cost = self._updateincs(batch_index)
self._trainmodel()
self.stepcost += cost
self.stepcount += 1
norm, angle_rad = self.get_step_info()
stop = time.time()
self.steptimer += (stop - start)
if (self.stepcount % verbose_stride == 0) and self.verbose:
print 'minibatch %d: %.2fs, lr %.3g cost %.6g, ' % (
self.stepcount, self.steptimer, self.learningrate,
self.stepcost / verbose_stride) + \
'update norm %.3g angle(RAD) %.3f' % (norm, angle_rad)
self.steptimer = 0.
self.stepcost = 0.
return cost
def step_fast(self, verbose_stride=1):
"""
A faster implementation of step(). Removes evaluation of angles,
norms, etc.
MUCH FASTER!! ~ 20 times!!
"""
start = time.time()
batch_index = self.rng.randint(0, self.numbatches - 1)
cost = self._updateincs(batch_index)
self._trainmodel()
self.stepcost += cost
self.stepcount += 1
stop = time.time()
self.steptimer += (stop - start)
if (self.stepcount % verbose_stride == 0) and self.verbose:
print 'minibatch %d: %.2fs, lr %.3g cost %.6g ' % (
self.stepcount, self.steptimer, self.learningrate,
self.stepcost / verbose_stride)
self.steptimer = 0.
self.stepcost = 0.
return cost
class Dropout(object):
class DropoutLayer(Layer):
def __init__(self, n_in, droprate, varin=None, theano_rng=None):
super(Dropout.DropoutLayer, self).__init__(n_in, n_in, varin=varin)
assert (droprate >= 0. and droprate <= 1.), \
"droprate has to be in the interval [0, 1]."
self.droprate = droprate
if not theano_rng:
theano_rng = RandomStreams(123)
assert isinstance(theano_rng, T.shared_randomstreams.RandomStreams)
self.theano_rng = theano_rng
def output(self):
return self.theano_rng.binomial(size=self.varin.shape, n=1,
p = 1 - self.droprate,
dtype=theano.config.floatX
) * self.varin
def _print_str(self):
return " (" + self.__class__.__name__ + ": droprate " + \
str(self.droprate) + ")"
def __init__(self, model, droprates, theano_rng=None):
"""
Build a noisy model according to the passed model, which is an
Autoencoder object or StackedLayer object. The newly built model shares
the same theano shared parameters with the initial one, but with
binomial zero-one noise injected at each layer. The ratios of dropped
units in each layer is spectfied at droprates, which is a list starting
from input layer.
"""
if not theano_rng:
theano_rng = RandomStreams(123)
assert isinstance(theano_rng, T.shared_randomstreams.RandomStreams)
self.theano_rng = theano_rng
self.model = model
self.set_droprates(droprates)
def set_droprates(self, droprates):
self.droprates = droprates
if isinstance(self.model, AutoEncoder):
assert len(droprates) == 2, "List \"droprates\" has a wrong length."
self.dropout_model = copy.copy(self.model)
def dropout_encoder():
return self.DropoutLayer(
n_in=self.dropout_model.n_in,
droprate=self.droprates[0],
varin=self.model.varin,
theano_rng=self.theano_rng
) + self.model.encoder()
self.dropout_model.encoder = dropout_encoder
def dropout_decoder():
return self.DropoutLayer(
n_in=self.dropout_model.n_hid,
droprate=self.droprates[1],
varin=self.dropout_model.encoder().output(),
theano_rng=self.theano_rng
) + self.model.decoder()
self.dropout_model.decoder = dropout_decoder
elif isinstance(self.model, StackedLayer):
# TODO: more thing to do for assertion here. Not sure if it will
# work for nested StackedLayer object.
# assert len(droprates) == len(self.model.models_stack), \
# "List \"droprates\" has a wrong length."
self.dropout_model = None
i = 0
for layer_model in self.model.models_stack:
copied_layer = copy.copy(layer_model)
if layer_model.params != []:
combination = self.DropoutLayer(
n_in=layer_model.n_in,
droprate=self.droprates[i],
theano_rng=self.theano_rng
) + copied_layer
if self.dropout_model == None:
self.dropout_model = combination
else:
self.dropout_model = self.dropout_model + combination
i += 1
else:
if self.dropout_model == None:
self.dropout_model = copied_layer
else:
self.dropout_model = self.dropout_model + copied_layer
elif isinstance(self.model, Layer):
assert len(droprates) == 1, "List \"droprates\" has a wrong length."
if self.model.params != []:
self.dropout_model = self.DropoutLayer(
n_in=self.model.n_in,
droprate=self.droprates[0],
theano_rng=self.theano_rng
) + copy.copy(self.model)
else:
raise TypeError("Dropout on a layer with no parameters has " + \
"no meaning currently")
else:
raise TypeError("Passed model has to be an Autoencoder, " + \
"StackedLayer or single Layer")
class BatchNormalization(object):
def __init__(self, model, BN_params=None, BN_meanstds=None, npy_rng=None):
"""
TODO: Not working ideally for autoencoders and single layers.
A batch normalization implementation according to the following paper:
http://arxiv.org/pdf/1502.03167.pdf
Basically it bulids 2 models: self.batchnorm_model, and
self.batchnorm_test. The former one is for training, while the latter
is only for test time use. Note that the self.batchnorm_test model is
not completed, that is to say, it is calling some of the attribute
members from self.batchnorm_model. As the two models are binded in a
same object, this can always be safe.
self.batchnorm_model is built according to the passed model,
which could be an AutoEncoder object or StackedLayer object. It
shares the same theano shared parameters with the initial
one, but has inplanted batch normalization at PRE-activations (i.e.
before applying the activation function).
Also, the class provides 2 new parameter groups:
- params_batchnorm. Includes [gamma, beta], which are introduced by
batch normalizaion.
- params_meanstds. Includes [wxmean, wxstd], which is the mean and standard
deviation of each layer's representation. It is only used by
self.batchnorm_test model, and irrelevant to the training process.
You will also need to update it before each time of test.
Remember to apply this class at the last step of any other tricks, like
dropout etc.
At test time, the means and variances are over the whole dataset. So it
should be a big batch containing all the training/testing samples for
this implementation. Will use an moving average instead in future
updates.
"""
if npy_rng is None:
npy_rng = numpy.random.RandomState(3456)
assert isinstance(npy_rng, numpy.random.RandomState), \
"npy_rng has to be a random number generater."
self.npy_rng = npy_rng
self.model = model
if BN_params:
self.BN_params = BN_params[::-1]
else:
self.BN_params = None
if BN_meanstds:
self.BN_meanstds = BN_meanstds[::-1]
else:
self.BN_meanstds = None
if isinstance(self.model, AutoEncoder):
print "WARNING: May not be correct now."
self.batchnorm_model = copy.copy(self.model)
def batchnorm_encoder():
encoder_layer = self.model.encoder()
assert not isinstance(encoder_layer, StackedLayer), (
"Batch normalization on deep autoencoder with more than "
"1 layer of encoder/decoder is not supported.")
if not self.BN_params:
encoder_layer.gamma = theano.shared(
numpy.ones(self.model.n_out,
dtype=theano.config.floatX),
name='gamma_encoder', borrow=True
)
encoder_layer.beta = theano.shared(
numpy.zeros(self.model.n_out,
dtype=theano.config.floatX),
name='beta_encoder', borrow=True
)
else:
encoder_layer.gamma = self.BN_params.pop()
encoder_layer.beta = self.BN_params.pop()
encoder_layer.params_batchnorm = encoder_layer.params + \
[encoder_layer.gamma, encoder_layer.beta]
encoder_layer.params_extra = \
[encoder_layer.gamma, encoder_layer.beta]
def normed_fanin_encoder():
return (
self.model.encoder().fanin() - \
self.model.encoder().fanin().mean(
axis=0, keepdims=True)
) / (
self.model.encoder().fanin().std(
axis=0, keepdims=True
) + 1E-6
) * encoder_layer.gamma + encoder_layer.beta
encoder_layer.fanin = normed_fanin_encoder
return encoder_layer
self.batchnorm_model.encoder = batchnorm_encoder
elif isinstance(self.model, StackedLayer):
"""
NOTE: if dealing with convolutional nets, there is one mean value
over each feature map, not each location! So the number (and shape)
of means and stds should be equal to biases.
"""
# TODO: more thing to do for assertion here. Not sure if it will
# work for nested StackedLayer object.
# assert len(droprates) == len(self.model.models_stack), \
# "List \"droprates\" has a wrong length."
copied_model_list = []
copied_model_list2 = []
for layer_model in self.model.models_stack:
copied_model_list.append(copy.copy(layer_model))
copied_model_list2.append(copy.copy(layer_model))
copied_model = StackedLayer(models_stack=copied_model_list,
varin=self.model.varin)
copied_model_test = StackedLayer(models_stack=copied_model_list2,
varin=self.model.varin)
self.params_batchnorm = []
self.params_meanstds = []
prev_layer = None
for (layer_model, layer_model_test) in zip(
copied_model.models_stack, copied_model_test.models_stack):
# process prev_layer
if prev_layer != None and prev_layer.params != []:
if not self.BN_params:
shape = prev_layer.n_out
if hasattr(prev_layer, 'conv'):
shape = (1, shape[1], 1, 1)
broadcastable = (True, False, True, True)
else:
shape = (1, shape)
broadcastable = (True, False)
print "Adding Batch Normalization to the layer \t" + \
prev_layer.__class__.__name__ + " " + \
str(prev_layer.n_out)
prev_layer.gamma = theano.shared(
numpy.ones(shape, dtype=theano.config.floatX),
name=prev_layer.__class__.__name__ + '_gamma',
borrow=True,
broadcastable=broadcastable
)
prev_layer.beta = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name=prev_layer.__class__.__name__ + '_beta',
borrow=True,
broadcastable=broadcastable
)
prev_layer.wxmean = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name = prev_layer.__class__.__name__ + '_wxmean',
borrow=True,
broadcastable=broadcastable
)
prev_layer.wxstd = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name = prev_layer.__class__.__name__ + '_wxstd',
borrow=True,
broadcastable=broadcastable
)
else:
prev_layer.gamma = self.BN_params.pop()
prev_layer.beta = self.BN_params.pop()
prev_layer.wxmean = self.BN_meanstds.pop()
prev_layer.wxstd = self.BN_meanstds.pop()
self.params_batchnorm += [prev_layer.gamma, prev_layer.beta]
self.params_meanstds += [prev_layer.wxmean,
prev_layer.wxstd]
wx = prev_layer.fanin() # wx stands for w * x.
if hasattr(prev_layer, 'conv'):
next_varin = prev_layer.output(
(
wx - wx.mean(axis=(0, 2, 3), keepdims=True)
) / (
wx.std(axis=(0, 2, 3), keepdims=True) + 1E-6
) * prev_layer.gamma + prev_layer.beta
)
else:
next_varin = prev_layer.output(
(
wx - wx.mean(axis=0, keepdims=True)
) / (
wx.std(axis=0, keepdims=True) + 1E-6
) * prev_layer.gamma + prev_layer.beta
)
wx_test = prev_layer_test.fanin()
next_varin_test = prev_layer_test.output(
(wx_test - prev_layer.wxmean) / (prev_layer.wxstd + 1E-6
) * prev_layer.gamma + prev_layer.beta
)
elif prev_layer != None:
next_varin = prev_layer.output()
next_varin_test = prev_layer_test.output()
if prev_layer != None:
right_is_conv = hasattr(layer_model, 'conv')
left_is_conv = hasattr(prev_layer, 'conv')
if (left_is_conv and right_is_conv) or (
(not left_is_conv) and (not right_is_conv)):
layer_model.varin = next_varin
layer_model_test.varin = next_varin_test
elif not right_is_conv:
layer_model.varin = next_varin.flatten(2)
layer_model_test.varin = next_varin_test.flatten(2)
elif not left_is_conv: # FC-CONV
layer_model.varin = next_varin.reshape(
layer_model.n_in)
layer_model_test.varin = next_varin_test.reshape(
layer_model.n_in)
prev_layer = layer_model
prev_layer_test = layer_model_test
# process the last layer
if layer_model.params != []:
if not self.BN_params:
shape = layer_model.n_out
if hasattr(layer_model, 'conv'):
shape = (1, shape[1], 1, 1)
broadcastable = (True, False, True, True)
else:
shape = (1, shape)
broadcastable = (True, False)
print "Adding Batch Normalization to the layer \t" + \
prev_layer.__class__.__name__ + " " + \
str(prev_layer.n_out)
layer_model.gamma = theano.shared(
numpy.ones(shape, dtype=theano.config.floatX),
name=layer_model.__class__.__name__ + '_gamma',
borrow=True,
broadcastable=broadcastable
)
layer_model.beta = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name=layer_model.__class__.__name__ + '_beta',
borrow=True,
broadcastable=broadcastable
)
layer_model.wxmean = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name=layer_model.__class__.__name__ + '_wxmean',
borrow=True,
broadcastable=broadcastable
)
layer_model.wxstd = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name=layer_model.__class__.__name__ + '_wxstd',
borrow=True,
broadcastable=broadcastable
)
else:
layer_model.gamma = self.BN_params.pop()
layer_model.beta = self.BN_params.pop()
layer_model.wxmean = self.BN_meanstds.pop()
layer_model.wxstd = self.BN_meanstds.pop()
self.params_batchnorm += [layer_model.gamma, layer_model.beta]
self.params_meanstds += [layer_model.wxmean, layer_model.wxstd]
if hasattr(layer_model, 'conv'):
def last_layer_output(fanin=None):
wx = layer_model.fanin() # wx stands for w * x.
return self.model.models_stack[-1].output(
(
wx - wx.mean(axis=(0, 2, 3), keepdims=True)
) / (
wx.std(axis=(0, 2, 3), keepdims=True) + 1E-6
) * layer_model.gamma + layer_model.beta
)
else:
def last_layer_output(fanin=None):
wx = layer_model.fanin() # wx stands for w * x.
return self.model.models_stack[-1].output(
(
wx - wx.mean(axis=0, keepdims=True)
) / (
wx.std(axis=0, keepdims=True) + 1E-6
) * layer_model.gamma + layer_model.beta
)
layer_model.output = last_layer_output
def last_layer_output_test(fanin=None):
wx = layer_model.fanin() # wx stands for w * x.
return self.model.models_stack[-1].output(
(wx - layer_model.wxmean) / (layer_model.wxstd + 1E-6
) * layer_model.gamma + layer_model.beta
)
layer_model.output = last_layer_output
layer_model_test.output = last_layer_output_test
self.batchnorm_model = copied_model
self.batchnorm_test = copied_model_test
elif isinstance(self.model, Layer):
print "WARNING: May not be correct now."
if self.model.params != []:
copied_layer = copy.copy(self.model)
if not self.BN_params:
shape = copied_layer.n_out
broadcastable = (False, )
if hasattr(copied_layer, 'conv'):
shape = (1,) + shape[1:]
broadcastable = (True,) + (False, ) * (len(shape) - 1)
copied_layer.gamma = theano.shared(
numpy.ones(shape, dtype=theano.config.floatX),
name=copied_layer.__class__.__name__ + '_gamma',
borrow=True,
broadcastable=broadcastable
)
copied_layer.beta = theano.shared(
numpy.zeros(shape, dtype=theano.config.floatX),
name=copied_layer.__class__.__name__ + '_beta',
borrow=True,
broadcastable=broadcastable
)
else:
copied_layer.gamma = self.BN_params.pop()
copied_layer.beta = self.BN_params.pop()
copied_layer.params_batchnorm = layer_model.params + \
[copied_layer.gamma, copied_layer.beta]
copied_layer.params_extra = \
[copied_layer.gamma, copied_layer.beta]
def normed_fanin():
return (
self.model.fanin() - self.model.fanin().mean(
axis=0, keepdims=True)
) / (
self.model.fanin().std(axis=0, keepdims=True) + 1E-6
) * copied_layer.gamma + copied_layer.beta
copied_layer.fanin = normed_fanin
self.batchnorm_model = copied_layer
else:
raise TypeError("Batch normalization on a layer with no "
"parameters has no meaning currently.")
else:
raise TypeError("Passed model has to be an Autoencoder, "
"StackedLayer or Layer object.")
def compute_meanstds(self, given_data):
if not hasattr(self, 'theano_funcs'):
#these are for computing means and stds at each layer.
self.theano_funcs = []
for layer in self.batchnorm_model.models_stack:
if layer.params != []:
if hasattr(layer, 'conv'):
self.theano_funcs.append(theano.function(
[self.batchnorm_model.varin],
layer.varfanin.mean(axis=(0, 2, 3), keepdims=True),
))
self.theano_funcs.append(theano.function(
[self.batchnorm_model.varin],
layer.varfanin.std(axis=(0, 2, 3), keepdims=True),
))
else:
self.theano_funcs.append(theano.function(
[self.batchnorm_model.varin],
layer.varfanin.mean(axis=0, keepdims=True),
))
self.theano_funcs.append(theano.function(
[self.batchnorm_model.varin],
layer.varfanin.std(axis=0, keepdims=True),
))
return [ifunc(given_data) for ifunc in self.theano_funcs]
def set_mean_stds(self, meanstds):
"""
set the mean and stds at each layer according to the passed input.
"""
assert len(meanstds) == len(self.params_meanstds), (
"passed list length is not consistent with the model's.")
for ishared, idata in zip(self.params_meanstds, meanstds):
ishared.set_value(idata)
| hantek/NeuroBricks | neurobricks/train.py | Python | bsd-3-clause | 54,936 |
from django.db import IntegrityError
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.status import HTTP_404_NOT_FOUND, HTTP_400_BAD_REQUEST
from treeherder.model.models import JobType, Push, Repository, InvestigatedTests
from treeherder.webapp.api.serializers import InvestigatedTestsSerializers
class InvestigatedViewSet(viewsets.ModelViewSet):
"""
Handles creating, reading and deleting investigated tests
"""
serializer_class = InvestigatedTestsSerializers
allowed_methods = ['GET', 'POST', 'DELETE']
def get_queryset(self):
revision = self.request.GET['revision']
project = self.kwargs['project']
try:
repository = Repository.objects.get(name=project)
push = Push.objects.get(revision=revision, repository=repository)
queryset = InvestigatedTests.objects.filter(push=push)
return queryset
except Push.DoesNotExist:
return Response(
"No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
)
except InvestigatedTests.DoesNotExist:
return Response(
"No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
)
def create(self, request, *args, **kwargs):
project = kwargs['project']
revision = request.query_params.get('revision')
test = request.data['test']
jobName = request.data['jobName']
jobSymbol = request.data['jobSymbol']
try:
repository = Repository.objects.get(name=project)
push = Push.objects.get(revision=revision, repository=repository)
job_type = JobType.objects.get(name=jobName, symbol=jobSymbol)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(push=push, job_type=job_type, test=test)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
"{0} already marked investigated".format(test), status=HTTP_400_BAD_REQUEST
)
except Push.DoesNotExist:
return Response(
"No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
)
except JobType.DoesNotExist:
return Response(
"No JobType with job name: {0}".format(jobName), status=HTTP_404_NOT_FOUND
)
def destroy(self, request, project, pk=None):
try:
investigated_test = InvestigatedTests.objects.get(pk=pk)
investigated_test.delete()
return Response(
status=status.HTTP_204_NO_CONTENT,
)
except InvestigatedTests.DoesNotExist:
return Response("Test already uninvestigated", status=HTTP_404_NOT_FOUND)
| jmaher/treeherder | treeherder/webapp/api/investigated_test.py | Python | mpl-2.0 | 2,969 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Comunitea Servicios Tecnológicos All Rights Reserved
# $Omar Castiñeira Saavedra <omar@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase Advance Payment",
"version": "1.0",
"author": "Comunitea",
'website': 'www.comunitea.com',
"category": "Purchases",
"description": """Allow to add advance payments on purchases and then use
its on invoices""",
"depends": ["purchase", "account_voucher"],
"data": ['wizard/purchase_advance_payment_wzd_view.xml',
'wizard/apply_advanced_amount_wzd_view.xml',
'partner_view.xml', 'invoice_view.xml', 'purchase_view.xml',
'security/ir.model.access.csv'],
"installable": True,
}
| jgmanzanas/CMNT_004_15 | project-addons/purchase_advance_payment/__openerp__.py | Python | agpl-3.0 | 1,575 |
##############################################################################
#
# Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# Copyright (C) 2011 Smile (<http://smile.fr>).
# Copyright (C) 2014 Acysos S.L. (<http://acysos.com>).
# @author: Ignacio Ibeas <ignacio@acysos.com>
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Payment Direct Debit',
'version': '7.0.2.134',
'license': 'AGPL-3',
'author': ['Therp BV', 'Smile'],
'website': 'https://launchpad.net/account-payment',
'category': 'Banking addons',
'depends': ['account_payment_extension'],
'data': [
'view/account_payment.xml',
'view/account_invoice.xml',
'view/payment_type.xml',
'workflow/account_invoice.xml',
'data/account_payment_term.xml',
],
'description': '''
This module adds support for direct debit orders, analogous to payment orders.
A new entry in the Accounting/Payment menu allow you to create a direct debit
order that helps you to select any customer invoices for you to collect.
This module explicitely implements direct debit orders as applicable
in the Netherlands. Debit orders are advanced in total by the bank.
Amounts that cannot be debited or are canceled by account owners are
credited afterwards. Such a creditation is called a storno. This style of
direct debit order may not apply to your country.
This module depends on and is part of the account payment for OpenERP. This set
of modules helps you to provide support for communications with your local
banking institutions.
Adapted to account_payment_extension by Acysos S.L. <info@acysos.com>
''',
'installable': True,
}
| otherway/sepa-tools | account_payment_direct_debit/__openerp__.py | Python | agpl-3.0 | 2,442 |
from social_django.admin import UserSocialAuthOption, NonceOption, AssociationOption
| cjltsod/python-social-auth | social/apps/django_app/default/admin.py | Python | bsd-3-clause | 85 |
# http://www.asterank.com/skymorph
#This API wraps NASA's SkyMorph archive in a RESTful JSON interface. Currently, it provides observation and image data from the NEAT survey.
from bowshock.helpers import bowshock_logger, dispatch_http_get
logger = bowshock_logger()
def search_target_obj(target):
'''
Query for a specific target:
http://asterank.com/api/skymorph/search?<params>
target Target object (lookup in MPC).
'''
base_url = "http://asterank.com/api/skymorph/search?"
if not isinstance(target, str):
raise ValueError("The target arg you provided is not the type of str")
else:
base_url += "target=" + target
return dispatch_http_get(base_url)
def search_orbit(**kwargs):
'''
Query based on orbital elements:
http://asterank.com/api/skymorph/search_orbit?<params>
epoch Epoch ([M]JD or ISO)
ecc eccentricity
per Perihelion distance (AU)
per_date Perihelion date ([M]JD or ISO)
om Longitude of ascending node (deg)
w Argument of perihelion (deg)
i Inclination (deg)
H Absolute magnitude
'''
base_url = "http://asterank.com/api/skymorph/search_orbit?"
for key in kwargs:
base_url += str(key) + "=" + kwargs[key] + "&"
# remove the unnecessary & at the end
base_url = base_url[:-1]
return dispatch_http_get(base_url)
def search_position(**kwargs):
'''
Query based on position and time (+/- 1 day):
http://asterank.com/api/skymorph/search_position?<params>
ra Right ascension (HMS)
Dec Declination (DMS)
time Date and time (UTC)
per_date Perihelion date ([M]JD or ISO)
om Longitude of ascending node (deg)
w Argument of perihelion (deg)
i Inclination (deg)
H Absolute magnitude
'''
base_url = "http://asterank.com/api/skymorph/search_position?"
for key in kwargs:
base_url += str(key) + "=" + kwargs[key] + "&"
# remove the unnecessary & at the end
base_url = base_url[:-1]
return dispatch_http_get(base_url)
| danwagnerco/bowshock | bowshock/skymorph.py | Python | gpl-2.0 | 2,047 |
from pychecker2 import TestSupport
from pychecker2 import VariableChecks
class UnknownTestCase(TestSupport.WarningTester):
def testUnknown(self):
self.warning('def f(): print a\n',
1, VariableChecks.UnknownCheck.unknown, 'a')
self.silent('def f():\n'
' a = 1\n'
' def g():\n'
' print a\n'
' print g()\n')
self.warning('def f():\n'
' def g():\n'
' print a\n'
' print g()\n',
3, VariableChecks.UnknownCheck.unknown, 'a')
self.silent('from sys import *\n'
'def f():\n'
' print argv\n')
self.silent('import sys\n'
'def f():\n'
' def g():\n'
' print g, sys\n')
self.silent('def f():\n'
' for a, b in [(1,2)]:\n'
' print a, b\n')
| lavjain/incubator-hawq | tools/bin/pythonSrc/pychecker-0.8.18/pychecker2/utest/unknown.py | Python | apache-2.0 | 1,029 |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 30 11:37:59 2016
@author: AF
"""
import ode as solving_ode
import math
import matplotlib.pyplot as plt
def correct(string,y_t): ##### delete all the data of (x,y) where y<0
x_record = string[0][-1]
y_record = y_t
while True:
if (string[1][-1] < y_t):
if (string[1][-1] < y_t and string[1][-2] > y_t):
x_record = string[0][-1]
y_record = string[1][-1]
del string[0][-1]
del string[1][-1]
else:
string[0].append(string[0][-1]+(y_t - string[1][-1])*(string[0][-1] - x_record)/(string[1][-2] - y_record))
string[1].append(y_t)
break
return string[0],string[1]
def calculate(v,theta):
vx_0 = v * math.cos(theta*math.pi/180)
vy_0 = v * math.sin(theta*math.pi/180)
A = solving_ode.ode(0.1,0,200,(0,vx_0,0,vy_0))
A.set_fx(['v_x','-0.00004*700*v_x*(1-(0.0065*y)/280)**2.5','v_y','-9.8-0.00004*700*v_y*(1-(0.0065*y)/280)**2.5'],['t','x','v_x','y','v_y'])
record = A.euler()[:]
return correct([record[1][0],record[1][2]],0)
def find_maxheight(string):
for i in range(len(string[1])):
if ( string[1][0] < string[1][i] ):
string[1][0] = string[1][i]
return string[1][0]
def scan_angle(v,theta, x_t, y_t, degree):
ran_a = [20, 5, 2, 0.8, 0.2]
delta = [5, 1, 0.5, 0.1, 0.02]
theta = theta - ran_a[degree]
theta_record = []
x = []
data = [[],[]]
for i in range(int(ran_a[degree]*2/delta[degree]+1)):
data = calculate(v,theta)[:]
if ( find_maxheight(data) < y_t):
theta = theta + delta[degree]
x.append(100000000)
theta_record.append(theta)
else:
data = correct(data,y_t)[:]
x.append(data[0][-1])
theta_record.append(theta)
theta = theta + delta[degree]
for j in range(len(x)):
if ( abs(x[0] - x_t) > abs(x[j] - x_t)):
x[0] = x[j]
theta_record[0] = theta_record[j]
return theta_record[0],x[0] ##### debug
def scan_v(v,theta, x_target, y_target, degree):
ran_v = [100, 10, 2, 0.8, 0.2]
delta = [20, 2, 0.5, 0.1, 0.02]
v = v - ran_v[degree]
x = []
theta_record = []
v_record = []
for i in range(int(ran_v[degree]*2/delta[degree]+1)):
record_v = scan_angle(v,theta, x_target, y_target, degree)[:]
x.append(record_v[1])
theta_record.append(record_v[0])
v_record.append(v)
v = v + delta[degree]
for j in range(len(x)):
if ( abs(x[0] - x_target) > abs(x[j] - x_target)):
x[0] = x[j]
v_record[0] = v_record[j]
theta_record[0] = theta_record[j]
print v_record
print theta_record
print x
return v_record[0],theta_record[0]
def deep_scan(v, theta, x_target, y_target, precision):
degree = 0
record = [[],[]]
for i in range(precision):
record = scan_v(v,theta,x_target,y_target,degree)[:]
v = record[0]
theta = record[1]
degree = degree + 1
return record
def judge_hitting(string,x_t,y_t):
alpha = 0
for i in range(len(string[0])):
if ( x_t - 8 < string[0][-1] < x_t + 8 ):
alpha = 1
if (alpha == 1):
print 'Successfully hitted'
return 1
else:
print 'Miss'
return 0
def main():
x_target = 800
y_target = 8000
# theta0 = 180*math.atan(y_target/x_target)/math.pi
data_record = deep_scan(700,60,x_target,y_target,5) ### 3 for grade 1, 4 for grade 2 and 5 for grade 3
v = data_record[0]
theta = data_record[1]
cannon_record = correct(calculate(v, theta),y_target)
x_max = cannon_record[0][-1]
judge_hitting(cannon_record,x_target,y_target)
print x_max
print theta
print v
plt.figure(figsize = (8,6))
plt.title('Trajectory of cannon shell')
plt.xlabel('x(m)')
plt.ylabel('y(m)')
plt.plot(x_target,y_target,'k*',linewidth = 10,label='Target')
plt.plot(cannon_record[0],cannon_record[1],label= 'Trajectroy')
plt.legend()
plt.savefig('chapter2.png',dpi = 144)
plt.show()
main()
| 1412kid/computationalphysics_n2014301020035 | Chapter2/chapter2_2.10.py | Python | mit | 4,276 |
#!/usr/bin/python3
#This script will determine the season and episode number of a TV show given the Title and episode Subtitle.
#If it cannot find the correct episode from the subtitle, the airdate will alternatively be used.
import requests
import json
def get_sonarr_id(sonarr_ip, sonarr_port, sonarr_api, tvdb_id):
#Get series data from Sonarr database and look for matching show
print ("Getting Sonarr show ID.")
series_json = json.loads(requests.get("http://%s:%s/api/series" % (sonarr_ip, sonarr_port), headers={"X-Api-Key":sonarr_api}).text)
#Get sonarr_id by matching tvdb_id
for series in series_json:
if str(series["tvdbId"]) == tvdb_id:
sonarr_id = series["id"]
break
try:
sonarr_id
except NameError:
#Show not found in Sonarr database
raise NameError("Show not found in Sonarr database. Not marked as unmonitored.")
return
else:
print ("Found Sonarr ID: \"%s\"." % sonarr_id)
return sonarr_id
def get_episode_id(sonarr_ip, sonarr_port, sonarr_api, sonarr_id, episode_info):
#Get sonarr episode id and check if file exists already
season = episode_info[1]
episode = episode_info[2]
show_json = json.loads(requests.get("http://%s:%s/api/episode/" % (sonarr_ip, sonarr_port), params = {"seriesID":sonarr_id}, headers={"X-Api-Key":sonarr_api}).text)
for show in show_json:
if str(show["episodeNumber"]) == episode and str(show["seasonNumber"]) == season:
hasfile = show["hasFile"]
episode_id = show["id"]
break
try:
episode_id
except NameError:
raise NameError("Episode not found in Sonarr episode list for sonarr_id %s." % sonarr_id)
return
else:
return (episode_id, hasfile)
def unmonitor(sonarr_ip, sonarr_port, sonarr_api, sonarr_id, episode_id):
#Send sonarr_id and episode_id to Sonarr to mark as unmonitored.
r = requests.put("http://%s:%s/api/episode/" % (sonarr_ip, sonarr_port), json = {"seriesId": sonarr_id, "id": episode_id, "monitored": "false"}, headers={"X-Api-Key":sonarr_api})
print (r.content.decode("utf-8"))
return
def scan(sonarr_ip, sonarr_port, sonarr_api, path):
#Initiate Sonarr scan
r = requests.post("http://%s:%s/api/command/" % (sonarr_ip, sonarr_port), json = {"name": "downloadedepisodesscan", "path": path}, headers={"X-Api-Key":sonarr_api})
print (r.content.decode("utf-8"))
return | CTetford/Tvheadend-scripts | modules/sonarr_functions.py | Python | gpl-3.0 | 2,291 |
"""
Tests for StopWordFactory
"""
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from mots_vides.stop_words import StopWord
from mots_vides.factory import StopWordFactory
from mots_vides.exceptions import StopWordError
class StopWordFactoryTestCase(TestCase):
def setUp(self):
self.data_directory = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'datas/')
self.factory = StopWordFactory(self.data_directory,
{'kl': 'klingon',
'si': 'sindarin'})
def test_get_stopwords(self):
sw = self.factory.get_stop_words('klingon')
self.assertTrue(isinstance(sw, StopWord))
self.assertEqual(sorted(list(sw.collection)),
["HIja'", "ghobe'", 'naDev', 'nuq'])
def test_get_stopwords_shortcuts(self):
sw = self.factory.get_stop_words('kl')
self.assertEqual(sorted(list(sw.collection)),
["HIja'", "ghobe'", 'naDev', 'nuq'])
def test_get_stopwords_unavailable_language(self):
self.assertRaises(StopWordError, self.factory.get_stop_words, 'vulcan')
sw = self.factory.get_stop_words('vulcan', fail_safe=True)
self.assertEqual(list(sw.collection), [])
def test_get_stopwords_file_unreadable(self):
self.factory.available_languages # Fill the cache, pass security
self.factory.data_directory = '/brutal/change/'
self.assertRaises(StopWordError,
self.factory.get_stop_words, 'klingon')
sw = self.factory.get_stop_words('klingon', fail_safe=True)
self.assertEqual(list(sw.collection), [])
def test_get_stopwords_cache(self):
self.assertEqual(self.factory.LOADED_LANGUAGES_CACHE, {})
self.factory.get_stop_words('klingon')
self.assertEqual(list(self.factory.LOADED_LANGUAGES_CACHE.keys()),
['klingon'])
sw = self.factory.get_stop_words('kl')
self.assertEqual(list(self.factory.LOADED_LANGUAGES_CACHE.keys()),
['klingon'])
self.factory.data_directory = '/brutal/change/'
self.assertEqual(sw.collection,
self.factory.get_stop_words('klingon').collection)
def test_get_stopwords_cache_and_errors(self):
self.assertRaises(StopWordError, self.factory.get_stop_words, 'vulcan')
self.assertRaises(StopWordError, self.factory.get_stop_words, 'vulcan')
self.assertEqual(self.factory.LOADED_LANGUAGES_CACHE, {})
self.factory.get_stop_words('vulcan', fail_safe=True)
self.assertEqual(self.factory.LOADED_LANGUAGES_CACHE, {})
self.assertRaises(StopWordError, self.factory.get_stop_words, 'vulcan')
def test_get_stopwords_cache_twice_python3(self):
sw = self.factory.get_stop_words('klingon')
self.assertEquals(len(sw), len(self.factory.get_stop_words('klingon')))
def test_available_languages(self):
self.assertEqual(self.factory.available_languages,
['klingon', 'sindarin'])
self.factory.data_directory = '/brutal/change/'
self.assertEqual(self.factory.available_languages,
['klingon', 'sindarin'])
def test_available_languages_error(self):
self.factory.data_directory = '/brutal/change/'
self.assertRaises(StopWordError,
lambda: self.factory.available_languages)
def test_get_collection_filename(self):
filename = self.factory.get_collection_filename('foo')
self.assertTrue(filename.endswith('foo.txt'))
self.assertTrue(filename.startswith(self.data_directory))
def test_read_collection(self):
collection_file = NamedTemporaryFile()
collection_text = 'egor\n\n \nai\n'
collection_file.write(collection_text.encode('utf-8'))
collection_file.seek(0)
collection = self.factory.read_collection(collection_file.name)
self.assertEqual(list(collection), ['egor', 'ai'])
collection_file.close()
def test_write_collection(self):
collection_file = NamedTemporaryFile()
self.factory.write_collection(
collection_file.name,
['nuq', "HIja'", "ghobe'", 'naDev'])
collection_file.seek(0)
self.assertEqual(collection_file.read().decode('utf-8'),
"HIja'\nghobe'\nnaDev\nnuq")
collection_file.close()
| Fantomas42/mots-vides | mots_vides/tests/factory.py | Python | bsd-3-clause | 4,559 |
#!/usr/bin/python
# Copyright (C) 2012 Sibi <sibi@psibi.in>
#
# This file is part of pyuClassify.
#
# pyuClassify program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyuClassify program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyuClassify program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyuClassify. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Sibi <sibi@psibi.in>
from .uclassify_endpoints import uclassify_http_status_codes
class uClassifyError(Exception):
"""
Generic error class, catches all the uClassify issues.
"""
def __init__(self,msg,error_code=None):
self.msg = msg
self.error_code = error_code
if error_code is not None and error_code in uclassify_http_status_codes:
self.msg = '%s: %s --%s' % (uclassify_http_status_codes[error_code][0],uclassify_http_status_codes[error_code][1],self.msg)
def __str__(self):
return repr(self.msg)
| psibi/pyuClassify | uclassify/uclassify_eh.py | Python | gpl-3.0 | 1,589 |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.i18n import _LE, _LI
from neutron.openstack.common import policy as common_policy
from neutron import policy
from neutron import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
common_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
# use plugin's dhcp notifier, if this is already instantiated
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
self._dhcp_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
if cfg.CONF.notify_nova_on_port_data_changes:
from neutron.notifiers import nova
self._nova_notifier = nova.Notifier()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_LI("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.iteritems():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in data.iteritems()
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except common_policy.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context,
name,
resource,
pluralized=self._collection)
return getattr(self._plugin, name)(*arg_list, **kwargs)
return _handle_action
else:
raise AttributeError()
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin,
pluralized=self._collection)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
if self._collection in data:
for body in data[self._collection]:
item = {self._resource: body}
self._dhcp_agent_notifier.notify(context, item, methodname)
else:
self._dhcp_agent_notifier.notify(context, data, methodname)
def _send_nova_notification(self, action, orig, returned):
if hasattr(self, '_nova_notifier'):
self._nova_notifier.send_network_change(action, orig, returned)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except common_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception(_LE("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
def create(self, request, body=None, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
self._notifier.info(request.context,
self._resource + '.create.start',
body)
body = Controller.prepare_request_body(request.context, body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
# Ensure policy engine is initialized
policy.init()
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, self._collection,
tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
self._send_nova_notification(action, {},
{self._resource: obj})
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
except common_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
notifier_method = self._resource + '.delete.end'
self._notifier.info(request.context,
notifier_method,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_nova_notification(action, {}, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
parent_id = kwargs.get(self._parent_id_name)
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in self._attr_info.iteritems()
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
try:
policy.enforce(request.context,
action,
orig_obj,
pluralized=self._collection)
except common_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to return
# a 403. Otherwise, pretend that it doesn't exist to avoid
# giving away information.
if request.context.tenant_id != orig_obj['tenant_id']:
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
self._send_nova_notification(action, orig_object_copy, result)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
else:
msg = _("Running without keystone AuthN requires "
" that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug("Request body: %(body)s", {'body': body})
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
if not body[collection]:
raise webob.exc.HTTPBadRequest(_("Resources required"))
bulk_body = [
Controller.prepare_request_body(
context, item if resource in item else {resource: item},
is_create, resource, attr_info, allow_bulk
) for item in body[collection]
]
return {collection: bulk_body}
res_dict = body.get(resource)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, is_create)
Controller._verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in attr_info.iteritems():
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in attr_info.iteritems():
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in attr_info.iteritems():
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or request.context.is_advsvc or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
msg = _("Tenant %(tenant_id)s not allowed to "
"create %(resource)s on this network")
raise webob.exc.HTTPForbidden(msg % {
"tenant_id": resource_item['tenant_id'],
"resource": self._resource,
})
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
| yuewko/neutron | neutron/api/v2/base.py | Python | apache-2.0 | 32,611 |
from openmdao.main.api import Component, Assembly
from openmdao.main.datatypes.api import Float, Int, Array
from openmdao.lib.drivers.api import NewtonSolver
import numpy as np
class PolyScalableProblem(Assembly):
"""
Multivariable polynomial test problem
m : number of variables (also the number of components)
X : vector of variables. will be set by the solver
roots : vector of solutions for each variable.
The output of each component will be zero when `X` takes the
value of roots.
"""
def __init__(self, m):
self.m = m
super(PolyScalableProblem, self).__init__()
def configure(self):
self.add("driver", NewtonSolver())
self.add("X", Array(np.zeros(self.m), iotype="in"))
self.add("roots", Array(np.zeros(self.m), iotype="in"))
self.driver.add_parameter("X", low=-1e30, high=1e30)
for i in xrange(self.m):
compname = "p%s" % str(i)
self.add(compname, PolyComp(self.m))
self.driver.workflow.add(compname)
self.connect("X", compname + ".X")
self.connect("roots", compname + ".roots")
self.driver.add_constraint("p%s.F = 0.0" % str(i))
class PolyComp(Component):
"""
Single polynomial component.
Constructs and evaluates a polynomial constructed from variables sampled
from the input array, `X`
The variables sampled from `X` can be set on instancing (by setting the
indices into `X` in the list `idx`). Otherwise, they will be selected
randomly.
For example, if idx = [0,1], than the output F of this component is:
F = (X[0]-roots[0]) + (X[1]-roots[1]) + (X[0]-roots[0])*(X[1]-roots[1])
and will take the value of zero if the values of `X` match the corresponding
`root` values.
"""
F = Float(0., iotype="out")
def __init__(self, m, idx = []):
self.m = m
self.idx = idx
self.p = 1.
super(PolyComp, self).__init__()
def configure(self):
self.add("X", Array(np.zeros(self.m), iotype="in"))
self.add("roots", Array(np.zeros(self.m), iotype="in"))
if not len(self.idx):
num_vars = np.random.randint(2, self.m)
self.idx = np.random.choice(range(self.m), num_vars, replace=False),
def execute(self):
self.F = np.sum((self.X[self.idx] - self.roots[self.idx]))
self.F += np.prod(self.X[self.idx] - self.roots[self.idx])
def provideJ(self):
self.J = np.zeros((1,self.m))
for i in xrange(m):
if i in self.idx[0]:
term = 1. + np.prod([self.X[k] - self.roots[k] for k in self.idx[0] if k !=i])
self.J[0, i] = term
return self.J
def list_deriv_vars(self):
input_keys = ('X',)
output_keys = ('F',)
return input_keys, output_keys
if __name__ == "__main__":
m = 10 # number of polynomial variables & components
A = PolyScalableProblem(m)
A.roots = np.arange(m)
# A.X = np.arange(m) # actual solution
A.run()
print A.X # computed solution
print max([A.get("p%s" % str(i)).F for i in xrange(m)]) # max error | HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.lib-0.13.0-py2.7.egg/openmdao/lib/optproblems/polyscale.py | Python | gpl-2.0 | 3,183 |
import re
REGEX_START = '^'
REGEX_END = '.*'
def create_regex(search_text):
return REGEX_START + re.escape(search_text) + REGEX_END
def add_matches_to_list(matches, list_):
for m in matches:
if m not in list_:
list_.append(m)
# def calculate_symmetric_difference_between_two_cpe_lists(cpe_list_a, cpe_list_b):
# if len(cpe_list_a) > len(cpe_list_b):
# return calculate_symmetric_difference(cpe_list_a, cpe_list_b)
# return calculate_symmetric_difference(cpe_list_b, cpe_list_a)
#
#
# def calculate_symmetric_difference(cpe_list_a, cpe_list_b):
# difference_set = []
# for a in cpe_list_a:
# is_common_element = False
# for b in cpe_list_b:
# if dict(a).get('uri_binding') == dict(b).get('uri_binding'):
# is_common_element = True
# break
# if not is_common_element:
# difference_set.append(a)
# return difference_set
#
#
# def calculate_intersection_between_two_cpe_lists(cpe_list_a, cpe_list_b):
# if len(cpe_list_a) > len(cpe_list_b):
# return calculate_intersection(cpe_list_a, cpe_list_b)
# return calculate_intersection(cpe_list_b, cpe_list_a)
#
#
# def calculate_intersection(cpe_list_a, cpe_list_b):
# intersection_set = []
# for a in cpe_list_a:
# for b in cpe_list_b:
# if dict(a).get('uri_binding') == dict(b).get('uri_binding'):
# intersection_set.append(a)
# return intersection_set
| fkie-cad/iva | matching/cpe_matcher_utils.py | Python | lgpl-3.0 | 1,514 |
# Copyright 2010 Steven Robertson
# 2012 Christoph Reiter
# 2017 Nick Boultbee
# 2018 Olli Helin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk, Gdk
from quodlibet import _
from quodlibet import app
from quodlibet import config
from quodlibet.qltk import Button, Icons
from quodlibet.plugins.events import EventPlugin
from quodlibet.util import print_e, print_w
import ast
# Presets (roughly) taken from Pulseaudio equalizer
PRESET_BANDS = [50, 100, 156, 220, 311, 440, 622, 880, 1250, 1750, 2500,
3500, 5000, 10000, 20000]
PRESETS = {
"flat": (_("Flat"), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
"live": (_("Live"), [-9.0, -5.5, 0.0, 1.5, 2.0, 3.5, 3.5, 3.5, 3.5,
3.5, 3.5, 3.5, 3.0, 1.5, 2.0]),
"full_bass_treble": (_("Full Bass & Treble"),
[5.0, 5.0, 3.5, 2.5, 0.0, -7.0, -14.0, -10.0, -10.0,
-8.0, 1.0, 1.0, 5.0, 7.5, 9.5]),
"club": (_("Club"), [0.0, 0.0, 0.0, 0.0, 3.5, 3.5, 3.5, 3.5, 3.5,
3.5, 3.5, 2.5, 2.5, 0.0, 0.0]),
"large_hall": (_("Large Hall"), [7.0, 7.0, 7.0, 3.5, 3.0, 3.0, 3.0, 1.5,
0.0, -2.0, -3.5, -6.0, -9.0, -1.0, 0.0]),
"party": (_("Party"), [5.0, 5.0, 5.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 2.5, 5.0]),
"rock": (_("Rock"), [5.5, 2.5, 2.5, -8.5, -10.5, -11.0, -16.0, -14.5,
-6.5, -5.5, -3.0, 3.0, 6.5, 7.0, 7.0]),
"soft": (_("Soft"), [3.0, 3.0, 1.0, 1.0, 0.0, -2.5, -5.0, 1.5, 0.0, 1.0,
3.0, 3.0, 6.0, 8.0, 8.0]),
"full_bass": (_("Full Bass"),
[-16.0, -16.0, 6.5, 6.5, 6.0, 5.5, 4.5, 1.0, 1.0, 1.0, -8.0,
-10.0, -16.0, -16.0, -20.5]),
"classical": (_("Classical"),
[0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -21.0, -21.0, -27.0]),
"reggae": (_("Reggae"), [0.0, 0.0, 0.0, 0.0, 0.0, -4.5, -10.0, -6.0, 0.5,
1.0, 2.0, 4.0, 4.0, 0.0, 0.0]),
"headphones": (_("Headphones"),
[3.0, 3.0, 7.0, 7.0, 3.0, -1.0, -6.5, -6.0, -4.5, -4.0,
1.0, 1.0, 6.0, 8.0, 9.0]),
"soft_rock": (_("Soft Rock"), [3.0, 3.0, 3.0, 1.5, 1.5, 1.5, 0.0, -3.5,
-8.0, -7.0, -10, -9.0, -6.5, 1.5, 6.0]),
"full_treble": (_("Full Treble"),
[5.0, -18.5, -18.5, -18.5, -18.5, -10.0, -8.0, -6.5, 1.5,
1.5, 1.5, 8.5, 10.5, 10.5, 10.5]),
"dance": (_("Dance"), [6.0, 4.0, 4.0, 1.5, 1.5, 1.5, 0.0, 0.0, 0.0,
1.0, -10.5, -14.0, -15.0, -7.0, 0.0]),
"pop": (_("Pop"), [-3.5, 1.0, 2.0, 3.0, 5.0, 5.5, 6.5, 5.0, 3.0, 1.5, 0.0,
-2.5, -5.0, -5.0, -3.0]),
"techno": (_("Techno"), [5.0, 4.0, 4.0, 3.0, 0.0, -4.5, -10.0, -9.0, -8.0,
-5.5, -1.5, 3.0, 6.0, 6.0, 6.0]),
"ska": (_("Ska"), [-4.5, -8.0, -9.0, -8.5, -8.0, -6.0, 0.0, 1.5, 2.5, 2.5,
3.0, 3.0, 6.0, 6.0, 6.0]),
"laptop": (_("Laptop"), [-1, -1, -1, -1, -5, -10, -18, -15, -10, -5, -5,
-5, -5, 0, 0]),
}
def interp_bands(src_band, target_band, src_gain):
"""Linear interp from one band to another. All must be sorted."""
gain = []
for i, b in enumerate(target_band):
if b in src_band:
gain.append(src_gain[i])
continue
idx = sorted(src_band + [b]).index(b)
idx = min(max(idx, 1), len(src_band) - 1)
x1, x2 = src_band[idx - 1:idx + 1]
y1, y2 = src_gain[idx - 1:idx + 1]
g = y1 + ((y2 - y1) * (b - x1)) / float(x2 - x1)
gain.append(min(12.0, g))
return gain
def get_config():
try:
config_str = config.get("plugins", "equalizer_levels", "[]")
config_dict = ast.literal_eval(config_str)
if isinstance(config_dict, list):
print_w("Converting old EQ config to new format.")
config_dict = {"Current": config_dict}
if not isinstance(config_dict, dict):
raise ValueError("Saved config is of wrong type.")
if not "Current" in config_dict.keys():
raise ValueError("Saved config was malformed.")
# Run through the values to check everything is of correct type.
for key in config_dict.keys():
[float(s) for s in config_dict[key]]
return config_dict
except (config.Error, ValueError) as e:
print_e(str(e))
return {"Current": []}
class Equalizer(EventPlugin):
PLUGIN_ID = "Equalizer"
PLUGIN_NAME = _("Equalizer")
PLUGIN_DESC = _("Controls the tone of your music with an equalizer.\n"
"Click or use keys to customise levels "
"(right-click resets the band).")
PLUGIN_ICON = Icons.AUDIO_CARD
@property
def player_has_eq(self):
return hasattr(app.player, 'eq_bands') and app.player.eq_bands
def __init__(self):
super().__init__()
self._enabled = False
self._config = {}
def apply(self):
if not self.player_has_eq:
return
levels = self._enabled and get_config()["Current"] or []
lbands = len(app.player.eq_bands)
if len(levels) != lbands:
print_w("Number of bands didn't match current. Using flat EQ.")
levels = [0.] * lbands
app.player.eq_values = levels
def enabled(self):
self._enabled = True
self.apply()
def disabled(self):
self._enabled = False
self.apply()
def PluginPreferences(self, win):
main_vbox = Gtk.VBox(spacing=12)
if not self.player_has_eq:
l = Gtk.Label()
l.set_markup(
_('The current backend does not support equalization.'))
main_vbox.pack_start(l, False, True, 0)
return main_vbox
def format_hertz(band):
if band >= 1000:
return _('%.1f kHz') % (band / 1000.)
return _('%d Hz') % band
bands = [format_hertz(band) for band in app.player.eq_bands]
self._config = get_config()
levels = self._config["Current"]
# This fixes possible old corrupt config files with extra level values.
if len(levels) != len(bands):
print_w("Number of bands didn't match current. Using flat EQ.")
levels = [0.] * len(bands)
table = Gtk.Table(rows=len(bands), columns=3)
table.set_col_spacings(6)
def set_band(adj, idx):
rounded = int(adj.get_value() * 2) / 2.0
adj.set_value(rounded)
levels[idx] = rounded
self._config["Current"] = levels
config.set('plugins', 'equalizer_levels', str(self._config))
self.apply()
adjustments = []
for i, band in enumerate(bands):
# align numbers and suffixes in separate rows for great justice
lbl = Gtk.Label(label=band.split()[0])
lbl.set_alignment(1, 0.5)
lbl.set_padding(0, 4)
table.attach(lbl, 0, 1, i, i + 1, xoptions=Gtk.AttachOptions.FILL)
lbl = Gtk.Label(label=band.split()[1])
lbl.set_alignment(1, 0.5)
table.attach(lbl, 1, 2, i, i + 1, xoptions=Gtk.AttachOptions.FILL)
adj = Gtk.Adjustment.new(levels[i], -24., 12., 0.5, 3, 0)
adj.connect('value-changed', set_band, i)
adjustments.append(adj)
hs = Gtk.HScale(adjustment=adj)
hs.connect('button-press-event', self.__rightclick)
hs.set_draw_value(True)
hs.set_value_pos(Gtk.PositionType.RIGHT)
hs.connect('format-value', lambda s, v: _('%.1f dB') % v)
table.attach(hs, 2, 3, i, i + 1)
main_vbox.pack_start(table, True, True, 0)
# Reset EQ button
def clicked_rb(button):
[adj.set_value(0) for adj in adjustments]
self._combo_default.set_active(0)
self._combo_custom.set_active(0)
# Delete custom preset button
def clicked_db(button):
selected_index = self._combo_custom.get_active()
if selected_index < 1:
return # Select…
selected = self._combo_custom.get_active_text()
self._combo_custom.set_active(0)
self._combo_custom.remove(selected_index)
del self._config[selected]
config.set('plugins', 'equalizer_levels', str(self._config))
# Save custom preset button
def clicked_sb(button):
name = self._preset_name_entry.get_text()
is_new = not name in self._config.keys()
levels = [adj.get_value() for adj in adjustments]
self._config[name] = levels
config.set('plugins', 'equalizer_levels', str(self._config))
self._preset_name_entry.set_text("")
if is_new:
self._combo_custom.append_text(name)
def find_iter(list_store, text):
i = list_store.get_iter_first()
while (i is not None):
if list_store.get_value(i, 0) == text:
return i
i = list_store.iter_next(i)
return None
itr = find_iter(self._combo_custom.get_model(), name)
self._combo_custom.set_active_iter(itr)
sorted_presets = sorted(PRESETS.items())
def default_combo_changed(combo):
if combo.get_active() < 1:
return # Select…
self._combo_custom.set_active(0)
gain = sorted_presets[combo.get_active() - 1][1][1]
gain = interp_bands(PRESET_BANDS, app.player.eq_bands, gain)
for (g, a) in zip(gain, adjustments):
a.set_value(g)
def custom_combo_changed(combo):
if combo.get_active() < 1:
# Case: Select…
self._delete_button.set_sensitive(False)
return
self._combo_default.set_active(0)
self._delete_button.set_sensitive(True)
gain = self._config[combo.get_active_text()]
for (g, a) in zip(gain, adjustments):
a.set_value(g)
def save_name_changed(entry):
name = entry.get_text()
if not name or name == "Current" or name.isspace():
self._save_button.set_sensitive(False)
else:
self._save_button.set_sensitive(True)
frame = Gtk.Frame(label=_("Default presets"), label_xalign=0.5)
main_middle_hbox = Gtk.HBox(spacing=6)
# Default presets
combo = Gtk.ComboBoxText()
self._combo_default = combo
combo.append_text(_("Select…"))
combo.set_active(0)
for key, (name, gain) in sorted_presets:
combo.append_text(name)
combo.connect("changed", default_combo_changed)
# This block is just for padding.
padboxv = Gtk.VBox()
padboxv.pack_start(combo, True, True, 6)
padboxh = Gtk.HBox()
padboxh.pack_start(padboxv, True, True, 6)
frame.add(padboxh)
main_middle_hbox.pack_start(frame, True, True, 0)
reset = Button(_("_Reset EQ"), Icons.EDIT_UNDO)
reset.connect('clicked', clicked_rb)
main_middle_hbox.pack_start(reset, False, False, 0)
main_vbox.pack_start(main_middle_hbox, False, False, 0)
frame = Gtk.Frame(label=_("Custom presets"), label_xalign=0.5)
main_bottom_vbox = Gtk.VBox()
# Custom presets
combo = Gtk.ComboBoxText()
self._combo_custom = combo
combo.append_text(_("Select…"))
combo.set_active(0)
custom_presets = self._config.keys() - {"Current"}
for key in custom_presets:
combo.append_text(key)
combo.connect("changed", custom_combo_changed)
hb = Gtk.HBox(spacing=6)
hb.pack_start(combo, True, True, 0)
delete = Button(_("_Delete selected"), Icons.EDIT_DELETE)
delete.connect('clicked', clicked_db)
delete.set_sensitive(False)
self._delete_button = delete
hb.pack_start(delete, False, False, 0)
main_bottom_vbox.pack_start(hb, True, True, 6)
hs = Gtk.HSeparator()
main_bottom_vbox.pack_start(hs, True, True, 6)
hb = Gtk.HBox()
l = Gtk.Label(label=_("Preset name for saving:"))
hb.pack_start(l, False, False, 0)
main_bottom_vbox.pack_start(hb, False, False, 0)
e = Gtk.Entry()
e.connect("changed", save_name_changed)
self._preset_name_entry = e
hb = Gtk.HBox(spacing=6)
hb.pack_start(e, True, True, 0)
save = Button(_("_Save"), Icons.DOCUMENT_SAVE)
save.connect('clicked', clicked_sb)
save.set_sensitive(False)
self._save_button = save
hb.pack_start(save, False, False, 0)
main_bottom_vbox.pack_start(hb, True, True, 6)
# This block is just for padding.
padboxh = Gtk.HBox()
padboxh.pack_start(main_bottom_vbox, True, True, 6)
frame.add(padboxh)
main_vbox.pack_start(frame, True, True, 0)
return main_vbox
def __rightclick(self, hs, event):
if event.button == Gdk.BUTTON_SECONDARY:
hs.set_value(0)
| Mellthas/quodlibet | quodlibet/ext/events/equalizer.py | Python | gpl-2.0 | 13,707 |
# Importing standard libraries
import sys
'''
Function to print the array with numbers in a space seperated format
'''
def printArray(a,delimiter):
arrayStr = ""
for i in a:
arrayStr += str(i) + delimiter
arrayStr.rstrip()
print arrayStr
'''
Space Efficient Counting sort version. Sorts using regular counting sort
algorithm except for the fact there is wrapping and unwrapping of vlaues
in the array to make space efficient frequency Table in the counting sort
Time Complexity = O(N)
Space Complexity = O(Max - Min)
'''
def unWrapIndex(i,minVal):
return i + minVal
def wrapIndex(i,minVal):
return i - minVal
def boundCountingSort(a):
XMin = min(a)
XMax = max(a)
b = [0] * (XMax - XMin + 1)
for i in a:
index = wrapIndex(i,XMin)
b[index] += 1
sortedArr = []
for i in range(len(b)):
for j in range(b[i]):
element = unWrapIndex(i,XMin)
sortedArr.append(element)
return b,sortedArr
'''
Main function to run the program. We simply calculate the
frequency diff based on logic used in counting sort which
enables us to run this algo in O(N) time
'''
if __name__ == "__main__":
aSize = sys.stdin.readline().rstrip()
a = [int(x) for x in sys.stdin.readline().rstrip().split()]
bSize = sys.stdin.readline().rstrip()
b = [int(x) for x in sys.stdin.readline().rstrip().split()]
[freqArray,sortedArray] = boundCountingSort(b)
# Computing the missing elements. Logic used is decrease
# frequency of elements existing in a in the frequency table of
# the array b . Frequency table was returned above
bMin = min(b)
bMax = max(b)
outputArr = []
for i in a:
index = wrapIndex(i,bMin)
freqArray[index] -= 1
for i in range(len(freqArray)):
element = unWrapIndex(i,bMin)
for j in range(freqArray[i]):
outputArr.append(element)
# Need to print missing numbers only once, hence set used
outputArr = list(set(outputArr))
# Need to print in ascending order, hence sorting used
outputArr.sort()
# Printing the result to the console
printArray(outputArr," ") | tejasnikumbh/Algorithms | Searching/MissingNumbers.py | Python | bsd-2-clause | 2,224 |
#!/usr/bin/env python2
#
# vocab.py - allows the user to enter word and meaning
#
# Copyright (c) 2015 Harsimran Singh <me@harsimransingh.in>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, time
def main(argv):
if len(sys.argv) != 1 and len(sys.argv) != 2:
print "Usage: python vocab.py --option"
print "Option: -a or --add to add a word (default if no args given)"
print "Option: -d or --disp to display words"
sys.exit(10)
if len(sys.argv) == 1 or sys.argv[1] == '-a' or sys.argv[1] == '--add':
word, meaning = get_word()
add_word(word, meaning)
elif sys.argv[1] == '-d' or sys.argv[1] == '--disp':
display_words()
def get_word():
print "Enter the word",
word = raw_input()
#print word
print "enter the meaning",
meaning = raw_input()
#print meaning
return word, meaning
def add_word(word, meaning):
main_directory = os.path.join(os.path.expanduser("~"), "wordlist")
# print main_directory
# check if the directory exist
main_directory = os.path.expanduser("~") + "/wordlist/"
# print "main directory: ", main_directory
if not os.path.exists(main_directory):
os.makedirs(main_directory)
if os.path.exists(main_directory):
filename = os.path.join(main_directory, file_to_store())
print filename
f = open(filename, 'a')
#f = open(main_directory + "/" + file_to_store(), 'a')
f.write(word + ', ' + meaning + '\n')
f.close()
def display_words():
print "Enter date(mmddyy): ",
date = raw_input()
filename = 'wl' + date
filepath = os.path.expanduser("~") + "/wordlist/" + filename
if not os.path.exists(filepath):
print "sorry file not found!"
exit()
# display the contents of the file
f = open(filepath, 'r')
for line in f:
line.rstrip('\n')
word, meaning = line.split(',')
print word + ": " + meaning,
def file_to_store():
# save maintaing unique file for each day
# grant a unique file name for current date
filename = "wl" + time.strftime("%m%d%y")
return filename
if __name__ == '__main__':
main(sys.argv)
| harsimrans/vocab-builder | vocab.py | Python | gpl-3.0 | 2,800 |
"""
WSGI config for inflation project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inflation.settings")
application = get_wsgi_application()
| lexieheinle/inflation-vs-unemployment | inflation/inflation/wsgi.py | Python | mit | 395 |
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Author, Article
class CommentModelTests(CommentTestCase):
def testSave(self):
for c in self.createSomeComments():
self.assertNotEqual(c.submit_date, None)
def testUserProperties(self):
c1, c2, c3, c4 = self.createSomeComments()
self.assertEqual(c1.name, "Joe Somebody")
self.assertEqual(c2.email, "jsomebody@example.com")
self.assertEqual(c3.name, "Frank Nobody")
self.assertEqual(c3.url, "http://example.com/~frank/")
self.assertEqual(c1.user, None)
self.assertEqual(c3.user, c4.user)
class CommentManagerTests(CommentTestCase):
def testInModeration(self):
"""Comments that aren't public are considered in moderation"""
c1, c2, c3, c4 = self.createSomeComments()
c1.is_public = False
c2.is_public = False
c1.save()
c2.save()
moderated_comments = list(Comment.objects.in_moderation().order_by("id"))
self.assertEqual(moderated_comments, [c1, c2])
def testRemovedCommentsNotInModeration(self):
"""Removed comments are not considered in moderation"""
c1, c2, c3, c4 = self.createSomeComments()
c1.is_public = False
c2.is_public = False
c2.is_removed = True
c1.save()
c2.save()
moderated_comments = list(Comment.objects.in_moderation())
self.assertEqual(moderated_comments, [c1])
def testForModel(self):
c1, c2, c3, c4 = self.createSomeComments()
article_comments = list(Comment.objects.for_model(Article).order_by("id"))
author_comments = list(Comment.objects.for_model(Author.objects.get(pk=1)))
self.assertEqual(article_comments, [c1, c3])
self.assertEqual(author_comments, [c2])
def testPrefetchRelated(self):
c1, c2, c3, c4 = self.createSomeComments()
# one for comments, one for Articles, one for Author
with self.assertNumQueries(3):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
| denisenkom/django | tests/comment_tests/tests/test_models.py | Python | bsd-3-clause | 2,160 |
import copy
import itertools
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
MutableSequence,
Optional,
Tuple,
Type,
Union,
)
from ._utils import (
ValueKind,
_is_missing_literal,
_is_none,
_resolve_optional,
format_and_raise,
get_value_kind,
is_int,
is_primitive_list,
is_structured_config,
type_str,
)
from .base import Container, ContainerMetadata, Node
from .basecontainer import BaseContainer
from .errors import (
ConfigAttributeError,
ConfigTypeError,
ConfigValueError,
KeyValidationError,
MissingMandatoryValue,
ReadonlyConfigError,
ValidationError,
)
class ListConfig(BaseContainer, MutableSequence[Any]):
_content: Union[List[Node], None, str]
def __init__(
self,
content: Union[List[Any], Tuple[Any, ...], str, None],
key: Any = None,
parent: Optional[Container] = None,
element_type: Union[Type[Any], Any] = Any,
is_optional: bool = True,
ref_type: Union[Type[Any], Any] = Any,
flags: Optional[Dict[str, bool]] = None,
) -> None:
try:
if isinstance(content, ListConfig):
if flags is None:
flags = content._metadata.flags
super().__init__(
parent=parent,
metadata=ContainerMetadata(
ref_type=ref_type,
object_type=list,
key=key,
optional=is_optional,
element_type=element_type,
key_type=int,
flags=flags,
),
)
self.__dict__["_content"] = None
self._set_value(value=content, flags=flags)
except Exception as ex:
format_and_raise(node=None, key=key, value=None, cause=ex, msg=str(ex))
def _validate_get(self, key: Any, value: Any = None) -> None:
if not isinstance(key, (int, slice)):
raise KeyValidationError(
"ListConfig indices must be integers or slices, not $KEY_TYPE"
)
def _validate_set(self, key: Any, value: Any) -> None:
from omegaconf import OmegaConf
self._validate_get(key, value)
if self._get_flag("readonly"):
raise ReadonlyConfigError("ListConfig is read-only")
if 0 <= key < self.__len__():
target = self._get_node(key)
if target is not None:
assert isinstance(target, Node)
if value is None and not target._is_optional():
raise ValidationError(
"$FULL_KEY is not optional and cannot be assigned None"
)
vk = get_value_kind(value)
if vk == ValueKind.MANDATORY_MISSING:
return
else:
is_optional, target_type = _resolve_optional(self._metadata.element_type)
value_type = OmegaConf.get_type(value)
if (value_type is None and not is_optional) or (
is_structured_config(target_type)
and value_type is not None
and not issubclass(value_type, target_type)
):
msg = (
f"Invalid type assigned: {type_str(value_type)} is not a "
f"subclass of {type_str(target_type)}. value: {value}"
)
raise ValidationError(msg)
def __deepcopy__(self, memo: Dict[int, Any]) -> "ListConfig":
res = ListConfig(None)
res.__dict__["_metadata"] = copy.deepcopy(self.__dict__["_metadata"], memo=memo)
res.__dict__["_flags_cache"] = copy.deepcopy(
self.__dict__["_flags_cache"], memo=memo
)
src_content = self.__dict__["_content"]
if isinstance(src_content, list):
content_copy: List[Optional[Node]] = []
for v in src_content:
old_parent = v.__dict__["_parent"]
try:
v.__dict__["_parent"] = None
vc = copy.deepcopy(v, memo=memo)
vc.__dict__["_parent"] = res
content_copy.append(vc)
finally:
v.__dict__["_parent"] = old_parent
else:
# None and strings can be assigned as is
content_copy = src_content
res.__dict__["_content"] = content_copy
res.__dict__["_parent"] = self.__dict__["_parent"]
return res
def copy(self) -> "ListConfig":
return copy.copy(self)
# hide content while inspecting in debugger
def __dir__(self) -> Iterable[str]:
if self._is_missing() or self._is_none():
return []
return [str(x) for x in range(0, len(self))]
def __setattr__(self, key: str, value: Any) -> None:
self._format_and_raise(
key=key,
value=value,
cause=ConfigAttributeError("ListConfig does not support attribute access"),
)
assert False
def __getattr__(self, key: str) -> Any:
# PyCharm is sometimes inspecting __members__, be sure to tell it we don't have that.
if key == "__members__":
raise AttributeError()
if key == "__name__":
raise AttributeError()
if is_int(key):
return self.__getitem__(int(key))
else:
self._format_and_raise(
key=key,
value=None,
cause=ConfigAttributeError(
"ListConfig does not support attribute access"
),
)
def __getitem__(self, index: Union[int, slice]) -> Any:
try:
if self._is_missing():
raise MissingMandatoryValue("ListConfig is missing")
self._validate_get(index, None)
if self._is_none():
raise TypeError(
"ListConfig object representing None is not subscriptable"
)
assert isinstance(self.__dict__["_content"], list)
if isinstance(index, slice):
result = []
start, stop, step = self._correct_index_params(index)
for slice_idx in itertools.islice(
range(0, len(self)), start, stop, step
):
val = self._resolve_with_default(
key=slice_idx, value=self.__dict__["_content"][slice_idx]
)
result.append(val)
if index.step and index.step < 0:
result.reverse()
return result
else:
return self._resolve_with_default(
key=index, value=self.__dict__["_content"][index]
)
except Exception as e:
self._format_and_raise(key=index, value=None, cause=e)
def _correct_index_params(self, index: slice) -> Tuple[int, int, int]:
start = index.start
stop = index.stop
step = index.step
if index.start and index.start < 0:
start = self.__len__() + index.start
if index.stop and index.stop < 0:
stop = self.__len__() + index.stop
if index.step and index.step < 0:
step = abs(step)
if start and stop:
if start > stop:
start, stop = stop + 1, start + 1
else:
start = stop = 0
elif not start and stop:
start = list(range(self.__len__() - 1, stop, -step))[0]
stop = None
elif start and not stop:
stop = start + 1
start = (stop - 1) % step
else:
start = (self.__len__() - 1) % step
return start, stop, step
def _set_at_index(self, index: Union[int, slice], value: Any) -> None:
self._set_item_impl(index, value)
def __setitem__(self, index: Union[int, slice], value: Any) -> None:
try:
if isinstance(index, slice):
_ = iter(value) # check iterable
self_indices = index.indices(len(self))
indexes = range(*self_indices)
# Ensure lengths match for extended slice assignment
if index.step not in (None, 1):
if len(indexes) != len(value):
raise ValueError(
f"attempt to assign sequence of size {len(value)}"
f" to extended slice of size {len(indexes)}"
)
# Initialize insertion offsets for empty slices
if len(indexes) == 0:
curr_index = self_indices[0] - 1
val_i = -1
# Delete and optionally replace non empty slices
only_removed = 0
for val_i, i in enumerate(indexes):
curr_index = i - only_removed
del self[curr_index]
if val_i < len(value):
self.insert(curr_index, value[val_i])
else:
only_removed += 1
# Insert any remaining input items
for val_i in range(val_i + 1, len(value)):
curr_index += 1
self.insert(curr_index, value[val_i])
else:
self._set_at_index(index, value)
except Exception as e:
self._format_and_raise(key=index, value=value, cause=e)
def append(self, item: Any) -> None:
content = self.__dict__["_content"]
index = len(content)
content.append(None)
try:
self._set_item_impl(index, item)
except Exception as e:
del content[index]
self._format_and_raise(key=index, value=item, cause=e)
assert False
def _update_keys(self) -> None:
for i in range(len(self)):
node = self._get_node(i)
if node is not None:
assert isinstance(node, Node)
node._metadata.key = i
def insert(self, index: int, item: Any) -> None:
from omegaconf.omegaconf import _maybe_wrap
try:
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot insert into a read-only ListConfig")
if self._is_none():
raise TypeError(
"Cannot insert into ListConfig object representing None"
)
if self._is_missing():
raise MissingMandatoryValue("Cannot insert into missing ListConfig")
try:
assert isinstance(self.__dict__["_content"], list)
# insert place holder
self.__dict__["_content"].insert(index, None)
is_optional, ref_type = _resolve_optional(self._metadata.element_type)
node = _maybe_wrap(
ref_type=ref_type,
key=index,
value=item,
is_optional=is_optional,
parent=self,
)
self._validate_set(key=index, value=node)
self._set_at_index(index, node)
self._update_keys()
except Exception:
del self.__dict__["_content"][index]
self._update_keys()
raise
except Exception as e:
self._format_and_raise(key=index, value=item, cause=e)
assert False
def extend(self, lst: Iterable[Any]) -> None:
assert isinstance(lst, (tuple, list, ListConfig))
for x in lst:
self.append(x)
def remove(self, x: Any) -> None:
del self[self.index(x)]
def __delitem__(self, key: Union[int, slice]) -> None:
if self._get_flag("readonly"):
self._format_and_raise(
key=key,
value=None,
cause=ReadonlyConfigError(
"Cannot delete item from read-only ListConfig"
),
)
del self.__dict__["_content"][key]
self._update_keys()
def clear(self) -> None:
del self[:]
def index(
self, x: Any, start: Optional[int] = None, end: Optional[int] = None
) -> int:
if start is None:
start = 0
if end is None:
end = len(self)
assert start >= 0
assert end <= len(self)
found_idx = -1
for idx in range(start, end):
item = self[idx]
if x == item:
found_idx = idx
break
if found_idx != -1:
return found_idx
else:
self._format_and_raise(
key=None,
value=None,
cause=ConfigValueError("Item not found in ListConfig"),
)
assert False
def count(self, x: Any) -> int:
c = 0
for item in self:
if item == x:
c = c + 1
return c
def _get_node(
self,
key: Union[int, slice],
validate_access: bool = True,
validate_key: bool = True,
throw_on_missing_value: bool = False,
throw_on_missing_key: bool = False,
) -> Union[Optional[Node], List[Optional[Node]]]:
try:
if self._is_none():
raise TypeError(
"Cannot get_node from a ListConfig object representing None"
)
if self._is_missing():
raise MissingMandatoryValue("Cannot get_node from a missing ListConfig")
assert isinstance(self.__dict__["_content"], list)
if validate_access:
self._validate_get(key)
value = self.__dict__["_content"][key]
if value is not None:
if isinstance(key, slice):
assert isinstance(value, list)
for v in value:
if throw_on_missing_value and v._is_missing():
raise MissingMandatoryValue("Missing mandatory value")
else:
assert isinstance(value, Node)
if throw_on_missing_value and value._is_missing():
raise MissingMandatoryValue("Missing mandatory value: $KEY")
return value
except (IndexError, TypeError, MissingMandatoryValue, KeyValidationError) as e:
if isinstance(e, MissingMandatoryValue) and throw_on_missing_value:
raise
if validate_access:
self._format_and_raise(key=key, value=None, cause=e)
assert False
else:
return None
def get(self, index: int, default_value: Any = None) -> Any:
try:
if self._is_none():
raise TypeError("Cannot get from a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot get from a missing ListConfig")
self._validate_get(index, None)
assert isinstance(self.__dict__["_content"], list)
return self._resolve_with_default(
key=index,
value=self.__dict__["_content"][index],
default_value=default_value,
)
except Exception as e:
self._format_and_raise(key=index, value=None, cause=e)
assert False
def pop(self, index: int = -1) -> Any:
try:
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot pop from read-only ListConfig")
if self._is_none():
raise TypeError("Cannot pop from a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot pop from a missing ListConfig")
assert isinstance(self.__dict__["_content"], list)
node = self._get_node(index)
assert isinstance(node, Node)
ret = self._resolve_with_default(key=index, value=node, default_value=None)
del self.__dict__["_content"][index]
self._update_keys()
return ret
except KeyValidationError as e:
self._format_and_raise(
key=index, value=None, cause=e, type_override=ConfigTypeError
)
assert False
except Exception as e:
self._format_and_raise(key=index, value=None, cause=e)
assert False
def sort(
self, key: Optional[Callable[[Any], Any]] = None, reverse: bool = False
) -> None:
try:
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot sort a read-only ListConfig")
if self._is_none():
raise TypeError("Cannot sort a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot sort a missing ListConfig")
if key is None:
def key1(x: Any) -> Any:
return x._value()
else:
def key1(x: Any) -> Any:
return key(x._value()) # type: ignore
assert isinstance(self.__dict__["_content"], list)
self.__dict__["_content"].sort(key=key1, reverse=reverse)
except Exception as e:
self._format_and_raise(key=None, value=None, cause=e)
assert False
def __eq__(self, other: Any) -> bool:
if isinstance(other, (list, tuple)) or other is None:
other = ListConfig(other, flags={"allow_objects": True})
return ListConfig._list_eq(self, other)
if other is None or isinstance(other, ListConfig):
return ListConfig._list_eq(self, other)
if self._is_missing():
return _is_missing_literal(other)
return NotImplemented
def __ne__(self, other: Any) -> bool:
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self) -> int:
return hash(str(self))
def __iter__(self) -> Iterator[Any]:
return self._iter_ex(resolve=True)
class ListIterator(Iterator[Any]):
def __init__(self, lst: Any, resolve: bool) -> None:
self.resolve = resolve
self.iterator = iter(lst.__dict__["_content"])
self.index = 0
from .nodes import ValueNode
self.ValueNode = ValueNode
def __next__(self) -> Any:
x = next(self.iterator)
if self.resolve:
x = x._dereference_node()
if x._is_missing():
raise MissingMandatoryValue(f"Missing value at index {self.index}")
self.index = self.index + 1
if isinstance(x, self.ValueNode):
return x._value()
else:
# Must be omegaconf.Container. not checking for perf reasons.
if x._is_none():
return None
return x
def __repr__(self) -> str: # pragma: no cover
return f"ListConfig.ListIterator(resolve={self.resolve})"
def _iter_ex(self, resolve: bool) -> Iterator[Any]:
try:
if self._is_none():
raise TypeError("Cannot iterate a ListConfig object representing None")
if self._is_missing():
raise MissingMandatoryValue("Cannot iterate a missing ListConfig")
return ListConfig.ListIterator(self, resolve)
except (TypeError, MissingMandatoryValue) as e:
self._format_and_raise(key=None, value=None, cause=e)
assert False
def __add__(self, other: Union[List[Any], "ListConfig"]) -> "ListConfig":
# res is sharing this list's parent to allow interpolation to work as expected
res = ListConfig(parent=self._get_parent(), content=[])
res.extend(self)
res.extend(other)
return res
def __radd__(self, other: Union[List[Any], "ListConfig"]) -> "ListConfig":
# res is sharing this list's parent to allow interpolation to work as expected
res = ListConfig(parent=self._get_parent(), content=[])
res.extend(other)
res.extend(self)
return res
def __iadd__(self, other: Iterable[Any]) -> "ListConfig":
self.extend(other)
return self
def __contains__(self, item: Any) -> bool:
if self._is_none():
raise TypeError(
"Cannot check if an item is in a ListConfig object representing None"
)
if self._is_missing():
raise MissingMandatoryValue(
"Cannot check if an item is in missing ListConfig"
)
lst = self.__dict__["_content"]
for x in lst:
x = x._dereference_node()
if x == item:
return True
return False
def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None:
try:
previous_content = self.__dict__["_content"]
self._set_value_impl(value, flags)
except Exception as e:
self.__dict__["_content"] = previous_content
raise e
def _set_value_impl(
self, value: Any, flags: Optional[Dict[str, bool]] = None
) -> None:
from omegaconf import MISSING, flag_override
if flags is None:
flags = {}
vk = get_value_kind(value, strict_interpolation_validation=True)
if _is_none(value, resolve=True):
if not self._is_optional():
raise ValidationError(
"Non optional ListConfig cannot be constructed from None"
)
self.__dict__["_content"] = None
elif vk is ValueKind.MANDATORY_MISSING:
self.__dict__["_content"] = MISSING
elif vk == ValueKind.INTERPOLATION:
self.__dict__["_content"] = value
else:
if not (is_primitive_list(value) or isinstance(value, ListConfig)):
type_ = type(value)
msg = f"Invalid value assigned: {type_.__name__} is not a ListConfig, list or tuple."
raise ValidationError(msg)
self.__dict__["_content"] = []
if isinstance(value, ListConfig):
self.__dict__["_metadata"] = copy.deepcopy(value._metadata)
self._metadata.flags = copy.deepcopy(flags)
# disable struct and readonly for the construction phase
# retaining other flags like allow_objects. The real flags are restored at the end of this function
with flag_override(self, ["struct", "readonly"], False):
for item in value._iter_ex(resolve=False):
self.append(item)
elif is_primitive_list(value):
with flag_override(self, ["struct", "readonly"], False):
for item in value:
self.append(item)
@staticmethod
def _list_eq(l1: Optional["ListConfig"], l2: Optional["ListConfig"]) -> bool:
l1_none = l1.__dict__["_content"] is None
l2_none = l2.__dict__["_content"] is None
if l1_none and l2_none:
return True
if l1_none != l2_none:
return False
assert isinstance(l1, ListConfig)
assert isinstance(l2, ListConfig)
if len(l1) != len(l2):
return False
for i in range(len(l1)):
if not BaseContainer._item_eq(l1, i, l2, i):
return False
return True
| omry/omegaconf | omegaconf/listconfig.py | Python | bsd-3-clause | 23,947 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_None/trend_PolyTrend/cycle_5/ar_/test_artificial_32_None_PolyTrend_5__100.py | Python | bsd-3-clause | 259 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckGroupMembershipResult(Model):
"""Server response for IsMemberOf API call.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param value: True if the specified user, group, contact, or service
principal has either direct or transitive membership in the specified
group; otherwise, false.
:type value: bool
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': 'bool'},
}
def __init__(self, *, additional_properties=None, value: bool=None, **kwargs) -> None:
super(CheckGroupMembershipResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
| Azure/azure-sdk-for-python | sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/check_group_membership_result_py3.py | Python | mit | 1,366 |
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Axel Tillequin (bdcht3@gmail.com)
# This code is part of Amoco
# published under GPLv2 license
import gtk
import math
from goocanvas import *
# connectors CX are embedded inside node views. These objects are drawn on
# the node's surface and exists only as sub-objects of their node. CX are used
# as 'ports' for edges connected with a node. A node can have several such CX
# and can register or unregister its edges on such CX.
class CX(Rect):
def __init__(self, e=None):
Rect.__init__(self, width=3, height=3)
self.set_properties(line_width=0, fill_color="red")
# self.props.visibility=False
# list of edges connected to this CX:
self.registered = []
if e != None:
self.register(e)
# self.connect('event',CX.eventhandler)
def set_wh(self, wh):
self.set_properties(width=wh[0], height=wh[1])
def get_wh(self):
return self.get_properties("width", "height")
wh = property(get_wh, set_wh)
def getpos(self):
bb = self.get_bounds()
return (bb.x1, bb.y1)
def getcenter(self):
xy = self.getpos()
wh = self.get_wh()
return (xy[0] + wh[0] / 2.0, xy[1] + wh[1] / 2.0)
# manage Edge_basic that are using this CX:
def register(self, item):
self.registered.append(item)
def unregister(self, item):
self.registered.remove(item)
def eventhandler(*args):
print("CX eventhandler on", args)
# ------------------------------------------------------------------------------
# decorators for eventhandlers: this sets the 'clicked' field to the mouse
# button id, and moves the object along with mouse-1 movements.
def mouse1moves(h):
def wrapper(self, item, e):
# self.last_msec = [0]
if e.type is gtk.gdk.BUTTON_PRESS:
self.clicked = e.button
self.oldx, self.oldy = e.get_coords()
# self.last_msec[0] = 0.
elif e.type is gtk.gdk.BUTTON_RELEASE:
self.clicked = 0
elif e.type is gtk.gdk.MOTION_NOTIFY:
# if abs(e.time - self.last_msec[0])<10: return False
# self.last_msec[0]=e.time
if self.clicked == 1:
newx, newy = e.get_coords()
tx, ty = newx - self.oldx, newy - self.oldy
self.translate(tx, ty)
self.notify("transform")
return h(self, item, e)
return wrapper
# ------------------------------------------------------------------------------
# This is a 'circle' shaped view for nodes.
class Node_basic(Group):
def __init__(self, name="?", r=10):
Group.__init__(self)
self.el = Ellipse(
parent=self, fill_color="gray88", stroke_color="black", line_width=2
)
# extra:
self.alpha = 1.0
self.r = r
self.label = Text(
parent=self,
text="[%s]" % name,
font="monospace, bold, 8",
fill_color="blue",
anchor=gtk.ANCHOR_CENTER,
)
# self.label.props.visibility = False
# edges connectors:
self.cx = []
# events:
self.connect("enter-notify-event", Node_basic.eventhandler)
self.connect("leave-notify-event", Node_basic.eventhandler)
self.connect("button-press-event", Node_basic.eventhandler)
self.connect("button-release-event", Node_basic.eventhandler)
self.connect("motion-notify-event", Node_basic.eventhandler)
# clicked: 1=mouse1, 2=mouse2, 3=mouse3
self.clicked = 0
self.connect("notify::transform", Node_basic.notifyhandler)
self.raise_(None)
self.w, self.h = self.wh
# prop:
def set_r(self, r):
self._r = r
self.el.set_properties(radius_x=r, radius_y=r)
def get_r(self):
return self._r
r = property(get_r, set_r)
def set_wh(self, wh):
pass
def get_wh(self):
bb = self.get_bounds()
self.w, self.h = (bb.x2 - bb.x1, bb.y2 - bb.y1)
return (self.w, self.h)
wh = property(get_wh, set_wh)
def set_xy(self, xy):
self.props.x, self.props.y = xy
def get_xy(self):
return (self.props.x, self.props.y)
xy = property(get_xy, set_xy)
# put the cx pt at the intersection between the circle shape of Node_basic
# and the radius from centre to pt 'topt'.
def intersect(self, topt, cx):
assert self.find_child(cx) != -1
# get cx pos on canvas:
bb = self.get_bounds()
w, h = self.get_wh()
x1, y1 = (w / 2.0, h / 2.0)
# intersect with target pt:
x2, y2 = topt[0] - bb.x1, topt[1] - bb.y1
theta = math.atan2(y2 - y1, x2 - x1)
newx = math.cos(theta) * self._r
newy = math.sin(theta) * self._r
cx.set_properties(x=newx, y=newy)
self._angle = theta
def set_alpha(self, a):
color = self.props.fill_color_rgba & 0xFFFFFF00
self.props.fill_color_rgba = color + (int(a * 255.0) & 0xFF)
def get_alpha(self):
return (self.props.fill_color_rgba & 0xFF) / 255.0
alpha = property(get_alpha, set_alpha)
@mouse1moves
def eventhandler(self, item, e):
if e.type is gtk.gdk.ENTER_NOTIFY:
self.props.line_width = 2.0
elif e.type is gtk.gdk.LEAVE_NOTIFY:
self.props.line_width = 1.0
return False
def notifyhandler(self, prop):
# print("notify %s on "%(prop.name,self))
for cx in self.cx:
for e in cx.registered:
e.update_points()
# ------------------------------------------------------------------------------
class Edge_basic(Polyline):
def __init__(self, n0, n1, head=False):
self.n = [n0, n1]
x0, y0 = n0.xy
x1, y1 = n1.xy
self.cx = [CX(self), CX(self)]
n0.cx.append(self.cx[0])
n0.add_child(self.cx[0])
n1.cx.append(self.cx[1])
n1.add_child(self.cx[1])
Polyline.__init__(self, points=Points([(x0, y0), (x1, y1)]))
self.set_properties(
close_path=False,
stroke_color="black",
end_arrow=True,
fill_pattern=None,
line_width=2,
)
if head:
self.set_properties(end_arrow=True)
self.update_points()
self.lower(None)
self.clicked = 0
def setpath(self, l):
self.props.points = Points(l)
self.update_points()
def update_points(self):
pts = self.props.points.coords
self.n[0].intersect(topt=pts[1], cx=self.cx[0])
self.n[1].intersect(topt=pts[-2], cx=self.cx[-1])
self.cx[-1].set_properties(fill_color="blue")
cx = self.cx[0].getcenter()
pts[0] = cx
cx = self.cx[-1].getcenter()
pts[-1] = cx
self.props.points = Points(pts)
# ------------------------------------------------------------------------------
class Edge_curve(Path):
def __init__(self, n0, n1, head=True):
self.n = [n0, n1]
self.has_head = head
self.cx = [CX(self), CX(self)]
n0.cx.append(self.cx[0])
n0.add_child(self.cx[0])
n1.cx.append(self.cx[1])
n1.add_child(self.cx[1])
Path.__init__(self)
self.set_properties(
stroke_color="black",
fill_pattern=None,
line_width=2,
line_cap=1,
line_join=1,
)
self.splines = [[n0.xy, n1.xy]]
self.update_points()
self.lower(None)
self.clicked = 0
def make_data(self):
p0 = self.splines[0][0]
data = "M %d %d" % p0
for s in self.splines:
if len(s) == 2:
data += " L %d %d" % s[1]
else:
data += " C %d %d" % s[1]
data += " %d %d" % s[2] + " %d %d" % s[3]
if self.has_head:
s = self.splines[-1]
p = s[-1]
dx = p[0] - s[-2][0]
dy = p[1] - s[-2][1]
l = math.sqrt(dx * dx + dy * dy)
dx, dy = dx / l * 6, dy / l * 6
data += " l %d %d" % (-dx - dy, -dy + dx)
data += " M %d %d" % p
data += " l %d %d" % (-dx + dy, -dy - dx)
data += " M %d %d" % p
return data
def setpath(self, l):
try:
self.splines = self.setcurve(l)
except:
pass
def update_points(self):
try:
spl0 = self.splines[0]
spl1 = self.splines[-1]
# move CX to intersection between edge and Node border:
self.n[0].intersect(topt=spl0[-1], cx=self.cx[0])
self.n[1].intersect(topt=spl1[0], cx=self.cx[1])
cx = self.cx[0].getcenter()
spl0[0] = cx
cx = self.cx[1].getcenter()
spl1[-1] = cx
self.set_properties(data=self.make_data())
except:
pass
# ------------------------------------------------------------------------------
class Node_codeblock(Group):
def __init__(self, code):
Group.__init__(self, can_focus=True)
self.codebox = Rect(parent=self, can_focus=True)
self.code = Text(
parent=self,
text=code,
font="monospace, 10",
use_markup=True,
can_focus=True,
fill_color="black",
)
self.padding = 4
bbink, bblogic = self.code.get_natural_extents()
w = (bblogic[2] * 0.001) + 2 * self.padding
h = (bblogic[3] * 0.001) + 2 * self.padding
self.codebox.set_properties(width=w, height=h)
self.codebox.set_properties(
fill_color="white", stroke_color="black", line_width=1
)
self.code.raise_(self.codebox)
# shadow :
self.shadow = s = 4
self.code.set_properties(x=self.padding, y=self.padding)
self.shadbox = Rect(
x=s, y=s, width=w, height=h, fill_color="grey44", line_width=0
)
self._wh = (w + s, h + s)
self.w, self.h = self._wh
self.cx = []
self.add_child(self.shadbox, 0)
self.shadbox.lower(self.codebox)
# events:
self.clicked = 0
self.connect("enter-notify-event", Node_codeblock.eventhandler)
self.connect("leave-notify-event", Node_codeblock.eventhandler)
self.connect("button-press-event", Node_codeblock.eventhandler)
self.connect("button-release-event", Node_codeblock.eventhandler)
self.connect("key-press-event", Node_codeblock.eventhandler)
self.connect("key-release-event", Node_codeblock.eventhandler)
self.connect("motion-notify-event", Node_codeblock.eventhandler)
self.connect("notify::transform", Node_codeblock.notifyhandler)
def set_wh(self, wh):
pass
def get_wh(self):
return self._wh
wh = property(get_wh, set_wh)
# xy property is bound to center of object
def get_xy(self):
w, h = self.codebox.get_properties("width", "height")
return (self.props.x + w / 2.0, self.props.y + h / 2.0)
def set_xy(self, xy):
w, h = self.codebox.get_properties("width", "height")
self.props.x, self.props.y = xy[0] - w / 2.0, xy[1] - h / 2.0
xy = property(get_xy, set_xy)
def intersect(self, topt, cx):
assert self.find_child(cx) != -1
bb = self.get_bounds()
w, h = self.codebox.get_properties("width", "height")
x1, y1 = (w / 2.0, h / 2.0)
x2, y2 = topt[0] - bb.x1, topt[1] - bb.y1
# now try all 4 segments of self rectangle:
S = [
((x1, y1), (x2, y2), (0, 0), (w, 0)),
((x1, y1), (x2, y2), (w, 0), (w, h)),
((x1, y1), (x2, y2), (0, h), (w, h)),
((x1, y1), (x2, y2), (0, h), (0, 0)),
]
for segs in S:
xy = intersect2lines(*segs)
if xy != None:
cx.set_properties(x=xy[0], y=xy[1])
break
def highlight_on(self, style=None):
import re
if style is None:
style = {
"addr": '<span foreground="blue">%s</span>',
"code": '<span foreground="black">%s</span>',
"mnem": '<span foreground="black" weight="bold">%s</span>',
"strg": '<span foreground="DarkRed">%s</span>',
"cons": '<span foreground="red">%s</span>',
"comm": '<span foreground="DarkGreen">%s</span>',
}
lre = re.compile("(0x[0-9a-f]+ )('[0-9a-f]+' +)(.*)$")
hcode = []
for l in self.code.get_properties("text")[0].splitlines():
if l.startswith("#"):
hcode.append(style["comm"] % l)
else:
m = lre.match(l)
if m is None:
return
g = m.groups()
s = [style["addr"] % g[0]]
s += [style["strg"] % g[1]]
s += [style["code"] % g[2]]
hcode.append("".join(s))
self.code.set_properties(text="\n".join(hcode))
self.code.set_properties(use_markup=True)
def highlight_off(self):
import re
lre = re.compile("<span [^>]+>(.*?)</span>")
code = []
for l in self.code.get_properties("text").splitlines():
g = lre.findall(l)
if len(g) > 0:
code.append("".join(g))
self.code.set_properties(text="\n".join(code))
self.code.set_properties(use_markup=False)
@mouse1moves
def eventhandler(self, item, e):
# print("*** CODEBLOCK EVENT =",e)
if e.type is gtk.gdk.ENTER_NOTIFY:
self.codebox.set_properties(line_width=2.0)
elif e.type is gtk.gdk.LEAVE_NOTIFY:
self.codebox.set_properties(line_width=1.0)
return False
def notifyhandler(self, prop):
# print("notify %s on "%(prop.name,self))
for cx in self.cx:
for e in cx.registered:
e.update_points()
def resrc(self, code):
self.code.props.text = code
bb = self.code.get_bounds()
w = (bb.x2 - bb.x1) + self.padding
h = (bb.y2 - bb.y1) + self.padding
self.codebox.set_properties(width=w, height=h)
self.shadbox.set_properties(width=w, height=h)
def intersect2lines(X1, X2, X3, X4):
(x1, y1) = X1
(x2, y2) = X2
(x3, y3) = X3
(x4, y4) = X4
b = (x2 - x1, y2 - y1)
d = (x4 - x3, y4 - y3)
det = b[0] * d[1] - b[1] * d[0]
if det == 0:
return None
c = (x3 - x1, y3 - y1)
t = float(c[0] * b[1] - c[1] * b[0]) / (det * 1.0)
if t < 0.0 or t > 1.0:
return None
t = float(c[0] * d[1] - c[1] * d[0]) / (det * 1.0)
if t < 0.0 or t > 1.0:
return None
x = x1 + t * b[0]
y = y1 + t * b[1]
return (x, y)
| bdcht/amoco | amoco/ui/graphics/gtk_/items.py | Python | gpl-2.0 | 14,955 |
#!/usr/bin/env python
from pymongo import MongoClient
import pymongo
HOST = "mongos-3sh-ex4q:27017,mongos-3sh-lenv:27017,mongos-3sh-ql7j:27017"
c = MongoClient('mongodb://'+HOST)
dbname = "google"
task = "task_events"
avg_cpu = "average_cpu"
mm_cpu = "maxmin_cpu"
med_cpu = "median_cpu"
ratio = "ratio"
avg = "average_ratio"
analysis = "analysis_ratio"
# dbname = "config"
# db_c = c[dbname]
# coll = "collections"
# coll_col = db_c[coll]
#
# coll_col.remove({"_id":"google.ratio"})
db = c[dbname]
task_col = db[task]
c[dbname].drop_collection(avg_cpu)
c[dbname].create_collection(avg_cpu)
c[dbname].drop_collection(mm_cpu)
c[dbname].create_collection(mm_cpu)
c[dbname].drop_collection(med_cpu)
c[dbname].create_collection(med_cpu)
#c[dbname].drop_collection(ratio)
#c[dbname].create_collection(ratio)
c[dbname].drop_collection(avg)
c[dbname].create_collection(avg)
c[dbname].drop_collection(analysis)
c[dbname].create_collection(analysis)
db = c[dbname]
task_col = db[task]
ratio_col = db[ratio]
avg_col = db[avg]
analysis_col = db[analysis]
#task_col.create_index([("CPU request", pymongo.ASCENDING)])
#task_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
#task_col.create_index([("_id", pymongo.HASHED)])
ratio_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
#c.admin.command('shardCollection', dbname+'.'+ratio, key={'_id': "hashed"})
#ratio_col.create_index([("_id", pymongo.HASHED)])
avg_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
#avg_col.create_index([("_id", pymongo.HASHED)])
analysis_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
#analysis_col.create_index([("_id", pymongo.HASHED)])
avg_cpu_col = db[avg_cpu]
med_cpu_col = db[med_cpu]
mm_cpu_col = db[mm_cpu]
avg_cpu_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
med_cpu_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
mm_cpu_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
| elainenaomi/sciwonc-dataflow-examples | sbbd2016/experiments/4-mongodb-rp-3sh/10_workflow_full_10files_primary_3sh_annot_with_proj_3s_hs/init_0/DataStoreInit.py | Python | gpl-3.0 | 2,180 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from oslo_config import cfg
from tacker.agent.linux import ip_lib
from tacker.agent.linux import utils
from tacker.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Tacker spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo', namespace=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
def enable(self, cmd_callback):
if not self.active:
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
def disable(self):
pid = self.pid
if self.active:
cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper)
elif pid:
LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'command'), {'uuid': self.uuid, 'pid': pid})
else:
LOG.debug(_('No process started for %s'), self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the file name for a given kind of config file."""
pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids))
if ensure_pids_dir and not os.path.isdir(pids_dir):
os.makedirs(pids_dir, 0o755)
return os.path.join(pids_dir, self.uuid + '.pid')
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
file_name = self.get_pid_file_name()
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
return int(f.read())
except IOError:
msg = _('Unable to access %s')
except ValueError:
msg = _('Unable to convert value in %s')
LOG.debug(msg, file_name)
return None
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.uuid in f.readline()
except IOError:
return False
| SripriyaSeetharam/tacker | tacker/agent/linux/external_process.py | Python | apache-2.0 | 3,284 |
"""This example demonstrates how to subscribe to topics with WAMP."""
import logging
import sys
from asphalt.core import ContainerComponent, Context, run_application
from asphalt.wamp.context import EventContext
logger = logging.getLogger(__name__)
def subscriber(ctx: EventContext, message: str):
logger.info('Received message from %s: %s', ctx.topic, message)
class SubscriberComponent(ContainerComponent):
async def start(self, ctx: Context):
self.add_component('wamp')
await super().start(ctx)
topic = sys.argv[1]
await ctx.wamp.subscribe(subscriber, topic)
logger.info('Subscribed to topic: %s', topic)
if len(sys.argv) < 2:
print('Usage: {} <topic>'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
run_application(SubscriberComponent(), logging=logging.INFO)
| asphalt-framework/asphalt-wamp | examples/pubsub/subscriber.py | Python | apache-2.0 | 834 |
# -*- coding: utf-8 -*-
#
# botocore documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 2 07:26:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from botocore.session import get_session
from botocore.docs import generate_docs
generate_docs(os.path.dirname(os.path.abspath(__file__)), get_session())
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'botocore'
copyright = u'2013, Mitch Garnaat'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.20.1'
# The full version, including alpha/beta/rc tags.
release = '1.20.106'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_show_sourcelink = False
html_sidebars = {
'**': ['logo-text.html',
'globaltoc.html',
'localtoc.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'botocoredoc'
import guzzle_sphinx_theme
extensions.append("guzzle_sphinx_theme")
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# hack to add tracking
"google_analytics_account": os.getenv('TRACKING', False),
"base_url": "http://docs.aws.amazon.com/aws-sdk-php/guide/latest/"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'botocore.tex', u'botocore Documentation',
u'Mitch Garnaat', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'botocore', u'botocore Documentation',
[u'Mitch Garnaat'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'botocore', u'botocore Documentation',
u'Mitch Garnaat', 'botocore', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| pplu/botocore | docs/source/conf.py | Python | apache-2.0 | 8,523 |
#!/usr/bin/env python2.7
############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
import unittest
from HandlerMock import HandlerMock
from zorpctl.szig import SZIG
class TestSzig(unittest.TestCase):
def setUp(self):
self.szig = SZIG("", HandlerMock)
def test_get_value(self):
self.assertEquals(self.szig.get_value(""), None)
self.assertEquals(self.szig.get_value("service"), None)
self.assertEquals(self.szig.get_value("info.policy.file"), "/etc/zorp/policy.py")
self.assertEquals(self.szig.get_value("stats.thread_number"), 5)
self.assertEquals(self.szig.get_value("service.service_http_transparent.sessions_running"), 0)
def test_get_sibling(self):
self.assertEquals(self.szig.get_sibling("conns"), "info")
self.assertEquals(self.szig.get_sibling("stats.threads_running"), "stats.thread_rate_max")
self.assertEquals(self.szig.get_sibling("stats.thread_rate_max"), "stats.audit_number")
self.assertEquals(self.szig.get_sibling("stats.thread_number"), None)
def test_get_child(self):
self.assertEquals(self.szig.get_child(""), "conns")
self.assertEquals(self.szig.get_child("info"), "info.policy")
self.assertEquals(self.szig.get_child("info.policy"), "info.policy.reload_stamp")
self.assertEquals(self.szig.get_child("info.policy.reload_stamp"), None)
def test_get_set_loglevel(self):
loglevel = 6
self.szig.loglevel = loglevel
self.assertEquals(self.szig.loglevel, loglevel)
def test_get_set_logspec(self):
logspec = "this is a logspec"
self.szig.logspec = logspec
self.assertEquals(self.szig.logspec, logspec)
def test_get_set_deadlockcheck(self):
deadlockcheck = False
self.szig.deadlockcheck = deadlockcheck
self.assertEquals(self.szig.deadlockcheck, deadlockcheck)
def test_reload_and_reload_result(self):
self.szig.reload()
self.assertEquals(self.szig.reload_result(), True)
def test_coredump(self):
try:
self.szig.coredump()
self.assertTrue(False, "szig coredump should not work while not repaired")
except:
self.assertTrue(True, "szig coredump is not working yet")
if __name__ == '__main__':
unittest.main()
| mochrul/zorp | tests/zorpctl/test_szig.py | Python | gpl-2.0 | 3,271 |
from __future__ import absolute_import
import warnings
from django import forms
from django.core.urlresolvers import reverse
from django.core import exceptions
from django.db.models import Q
from django.utils.translation import pgettext, ugettext_lazy as _, ugettext
from django.utils.http import int_to_base36
from django.utils.importlib import import_module
from django.contrib.auth import authenticate
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site
from ..utils import (email_address_exists, get_user_model,
set_form_field_order)
from .models import EmailAddress
from .utils import perform_login, setup_user_email
from .app_settings import AuthenticationMethod
from . import app_settings
from .adapter import get_adapter
User = get_user_model()
class PasswordField(forms.CharField):
def __init__(self, *args, **kwargs):
render_value = kwargs.pop('render_value',
app_settings.PASSWORD_INPUT_RENDER_VALUE)
kwargs['widget'] = forms.PasswordInput(render_value=render_value,
attrs={'placeholder':
_('Password')})
super(PasswordField, self).__init__(*args, **kwargs)
class SetPasswordField(PasswordField):
def clean(self, value):
value = super(SetPasswordField, self).clean(value)
min_length = app_settings.PASSWORD_MIN_LENGTH
if len(value) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
return value
class LoginForm(forms.Form):
password = PasswordField(label=_("Password"))
remember = forms.BooleanField(label=_("Remember Me"),
required=False)
user = None
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
login_widget = forms.TextInput(attrs={'placeholder':
_('E-mail address'),
'autofocus': 'autofocus'})
login_field = forms.EmailField(label=_("E-mail"),
widget=login_widget)
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME:
login_widget = forms.TextInput(attrs={'placeholder':
_('Username'),
'autofocus': 'autofocus'})
login_field = forms.CharField(label=_("Username"),
widget=login_widget,
max_length=30)
else:
assert app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME_EMAIL
login_widget = forms.TextInput(attrs={'placeholder':
_('Username or e-mail'),
'autofocus': 'autofocus'})
login_field = forms.CharField(label=pgettext("field label",
"Login"),
widget=login_widget)
self.fields["login"] = login_field
set_form_field_order(self, ["login", "password", "remember"])
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
login = self.cleaned_data["login"]
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
credentials["email"] = login
elif (app_settings.AUTHENTICATION_METHOD
== AuthenticationMethod.USERNAME):
credentials["username"] = login
else:
if "@" in login and "." in login:
credentials["email"] = login
credentials["username"] = login
credentials["password"] = self.cleaned_data["password"]
return credentials
def clean_login(self):
login = self.cleaned_data['login']
return login.strip()
def clean(self):
if self._errors:
return
user = authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is currently"
" inactive."))
else:
if app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.EMAIL:
error = _("The e-mail address and/or password you specified"
" are not correct.")
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME:
error = _("The username and/or password you specified are"
" not correct.")
else:
error = _("The login and/or password you specified are not"
" correct.")
raise forms.ValidationError(error)
return self.cleaned_data
def login(self, request, redirect_url=None):
ret = perform_login(request, self.user,
email_verification=app_settings.EMAIL_VERIFICATION,
redirect_url=redirect_url)
if self.cleaned_data["remember"]:
request.session.set_expiry(60 * 60 * 24 * 7 * 3)
else:
request.session.set_expiry(0)
return ret
class _DummyCustomSignupForm(forms.Form):
def signup(self, request, user):
"""
Invoked at signup time to complete the signup of the user.
"""
pass
def _base_signup_form_class():
"""
Currently, we inherit from the custom form, if any. This is all
not very elegant, though it serves a purpose:
- There are two signup forms: one for local accounts, and one for
social accounts
- Both share a common base (BaseSignupForm)
- Given the above, how to put in a custom signup form? Which form
would your custom form derive from, the local or the social one?
"""
if not app_settings.SIGNUP_FORM_CLASS:
return _DummyCustomSignupForm
try:
fc_module, fc_classname = app_settings.SIGNUP_FORM_CLASS.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s does not point to a form'
' class'
% app_settings.SIGNUP_FORM_CLASS)
try:
mod = import_module(fc_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured('Error importing form class %s:'
' "%s"' % (fc_module, e))
try:
fc_class = getattr(mod, fc_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Module "%s" does not define a'
' "%s" class' % (fc_module,
fc_classname))
if not hasattr(fc_class, 'save'):
raise exceptions.ImproperlyConfigured('The custom signup form must'
' implement a "save" method')
return fc_class
class BaseSignupForm(_base_signup_form_class()):
username = forms.CharField(label=_("Username"),
max_length=30,
min_length=app_settings.USERNAME_MIN_LENGTH,
widget=forms.TextInput(
attrs={'placeholder':
_('Username'),
}))
email = forms.EmailField(widget=forms.TextInput(attrs=
{'placeholder':
_('E-mail address')}))
def __init__(self, *args, **kwargs):
email_required = kwargs.pop('email_required',
app_settings.EMAIL_REQUIRED)
self.username_required = kwargs.pop('username_required',
app_settings.USERNAME_REQUIRED)
super(BaseSignupForm, self).__init__(*args, **kwargs)
# field order may contain additional fields from our base class,
# so take proper care when reordering...
field_order = ['email', 'username']
merged_field_order = list(self.fields.keys())
if email_required:
self.fields["email"].label = ugettext("E-mail")
self.fields["email"].required = True
else:
self.fields["email"].label = ugettext("E-mail (optional)")
self.fields["email"].required = False
if self.username_required:
field_order = ['email', 'username']
for field in field_order:
merged_field_order = [s for s in merged_field_order if s != field]
for field in ['password2', 'password1']:
if field in merged_field_order:
merged_field_order = [s for s in merged_field_order if s != field]
merged_field_order.insert(0, field)
# Merge our email and username fields in if they are not
# currently in the order. This is to allow others to
# re-arrange email and username if they desire. Go in reverse
# so that we make sure the inserted items are always
# prepended.
for field in reversed(field_order):
if not field in merged_field_order:
merged_field_order.insert(0, field)
set_form_field_order(self, merged_field_order)
if not self.username_required:
del self.fields["username"]
def clean_username(self):
value = self.cleaned_data["username"]
value = get_adapter().clean_username(value)
return value
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
if app_settings.UNIQUE_EMAIL:
if value and email_address_exists(value):
self.raise_duplicate_email_error()
return value
def raise_duplicate_email_error(self):
raise forms.ValidationError(_("A user is already registered"
" with this e-mail address."))
def custom_signup(self, request, user):
custom_form = super(BaseSignupForm, self)
if hasattr(custom_form, 'signup') and callable(custom_form.signup):
custom_form.signup(request, user)
else:
warnings.warn("The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning)
# Historically, it was called .save, but this is confusing
# in case of ModelForm
custom_form.save(user)
class SignupForm(BaseSignupForm):
password1 = SetPasswordField(label=_("Password"))
password2 = PasswordField(label=_("Password (again)"))
confirmation_key = forms.CharField(max_length=40,
required=False,
widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
if not app_settings.SIGNUP_PASSWORD_VERIFICATION:
del self.fields["password2"]
def clean(self):
super(SignupForm, self).clean()
if app_settings.SIGNUP_PASSWORD_VERIFICATION \
and "password1" in self.cleaned_data \
and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] \
!= self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password"
" each time."))
return self.cleaned_data
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
adapter.save_user(request, user, self)
self.custom_signup(request, user)
# TODO: Move into adapter `save_user` ?
setup_user_email(request, user, [])
return user
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AddEmailForm(UserForm):
email = forms.EmailField(label=_("E-mail"),
required=True,
widget=forms.TextInput(attrs={"size": "30"}))
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
errors = {
"this_account": _("This e-mail address is already associated"
" with this account."),
"different_account": _("This e-mail address is already associated"
" with another account."),
}
emails = EmailAddress.objects.filter(email__iexact=value)
if emails.filter(user=self.user).exists():
raise forms.ValidationError(errors["this_account"])
if app_settings.UNIQUE_EMAIL:
if emails.exclude(user=self.user).exists():
raise forms.ValidationError(errors["different_account"])
return value
def save(self, request):
return EmailAddress.objects.add_email(request,
self.user,
self.cleaned_data["email"],
confirm=True)
class ChangePasswordForm(UserForm):
oldpassword = PasswordField(label=_("Current Password"))
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current"
" password."))
return self.cleaned_data["oldpassword"]
def clean_password2(self):
if ("password1" in self.cleaned_data
and "password2" in self.cleaned_data):
if (self.cleaned_data["password1"]
!= self.cleaned_data["password2"]):
raise forms.ValidationError(_("You must type the same password"
" each time."))
return self.cleaned_data["password2"]
def save(self):
self.user.set_password(self.cleaned_data["password1"])
self.user.save()
class SetPasswordForm(UserForm):
password1 = SetPasswordField(label=_("Password"))
password2 = PasswordField(label=_("Password (again)"))
def clean_password2(self):
if ("password1" in self.cleaned_data
and "password2" in self.cleaned_data):
if (self.cleaned_data["password1"]
!= self.cleaned_data["password2"]):
raise forms.ValidationError(_("You must type the same password"
" each time."))
return self.cleaned_data["password2"]
def save(self):
self.user.set_password(self.cleaned_data["password1"])
self.user.save()
class ResetPasswordForm(forms.Form):
email = forms.EmailField(label=_("E-mail"),
required=True,
widget=forms.TextInput(attrs={"size": "30"}))
def clean_email(self):
email = self.cleaned_data["email"]
email = get_adapter().clean_email(email)
self.users = User.objects \
.filter(Q(email__iexact=email)
| Q(emailaddress__email__iexact=email)).distinct()
if not self.users.exists():
raise forms.ValidationError(_("The e-mail address is not assigned"
" to any user account"))
return self.cleaned_data["email"]
def save(self, **kwargs):
email = self.cleaned_data["email"]
token_generator = kwargs.get("token_generator",
default_token_generator)
for user in self.users:
temp_key = token_generator.make_token(user)
# save it to the password reset model
# password_reset = PasswordReset(user=user, temp_key=temp_key)
# password_reset.save()
current_site = Site.objects.get_current()
# send the password reset email
path = reverse("account_reset_password_from_key",
kwargs=dict(uidb36=int_to_base36(user.id),
key=temp_key))
url = '%s://%s%s' % (app_settings.DEFAULT_HTTP_PROTOCOL,
current_site.domain,
path)
context = {"site": current_site,
"user": user,
"password_reset_url": url}
get_adapter().send_mail('account/email/password_reset_key',
email,
context)
return self.cleaned_data["email"]
class ResetPasswordKeyForm(forms.Form):
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super(ResetPasswordKeyForm, self).__init__(*args, **kwargs)
# FIXME: Inspecting other fields -> should be put in def clean(self) ?
def clean_password2(self):
if ("password1" in self.cleaned_data
and "password2" in self.cleaned_data):
if (self.cleaned_data["password1"]
!= self.cleaned_data["password2"]):
raise forms.ValidationError(_("You must type the same"
" password each time."))
return self.cleaned_data["password2"]
def save(self):
# set the new user password
user = self.user
user.set_password(self.cleaned_data["password1"])
user.save()
| joebos/django-allauth | allauth/account/forms.py | Python | mit | 18,730 |
# Time: O(nlogn)
# Space: O(n)
import collections
class Solution:
# @param {string[]} strings
# @return {string[][]}
def groupStrings(self, strings):
groups = collections.defaultdict(list)
for s in strings: # Grouping.
groups[self.hashStr(s)].append(s)
result = []
for key, val in groups.iteritems():
result.append(sorted(val))
return result
def hashStr(self, s):
base = ord(s[0])
hashcode = ""
for i in xrange(len(s)):
if ord(s[i]) - base >= 0:
hashcode += unichr(ord('a') + ord(s[i]) - base)
else:
hashcode += unichr(ord('a') + ord(s[i]) - base + 26)
return hashcode
| kamyu104/LeetCode | Python/group-shifted-strings.py | Python | mit | 745 |
from typing import Tuple
from dash.utils import chunks, is_dict_equal
from dash.utils.sync import BaseSyncer, SyncOutcome, sync_local_to_changes, sync_local_to_set
from django.utils.timezone import now
from casepro.contacts.models import Contact, Field, Group
from casepro.msgs.models import Label, Message, Outgoing
from casepro.orgs_ext.models import Flow
from casepro.utils.email import send_raw_email
from . import BaseBackend
# no concept of flagging in RapidPro so that is modelled with a label
SYSTEM_LABEL_FLAGGED = "Flagged"
# maximum number of days old a message can be for it to be handled
MAXIMUM_HANDLE_MESSAGE_AGE = 30
def remote_message_is_flagged(msg):
return SYSTEM_LABEL_FLAGGED in [l.name for l in msg.labels]
def remote_message_is_archived(msg):
return msg.visibility == "archived"
class ContactSyncer(BaseSyncer):
"""
Syncer for contacts
"""
model = Contact
prefetch_related = ("groups",)
def local_kwargs(self, org, remote):
# groups and fields are updated via a post save signal handler
groups = [(g.uuid, g.name) for g in remote.groups]
fields = {k: v for k, v in remote.fields.items() if v is not None} # don't include none values
return {
"org": org,
"uuid": remote.uuid,
"name": remote.name,
"language": remote.language,
"urns": remote.urns,
"is_blocked": remote.blocked,
"is_stopped": remote.stopped,
"is_stub": False,
"fields": fields,
Contact.SAVE_GROUPS_ATTR: groups,
}
def update_required(self, local, remote, remote_as_kwargs):
if (
local.is_stub
or local.name != remote.name
or local.language != remote.language
or local.is_blocked != remote.blocked
or local.is_stopped != remote.stopped
or local.urns != remote.urns
):
return True
if {g.uuid for g in local.groups.all()} != {g.uuid for g in remote.groups}:
return True
return not is_dict_equal(local.get_fields(), remote.fields, ignore_none_values=True)
def delete_local(self, local):
local.release()
class FieldSyncer(BaseSyncer):
"""
Syncer for contact fields
"""
model = Field
local_id_attr = "key"
remote_id_attr = "key"
def local_kwargs(self, org, remote):
return {
"org": org,
"key": remote.key,
"label": remote.label,
"value_type": self.model.TEMBA_TYPES.get(remote.value_type, self.model.TYPE_TEXT),
}
def update_required(self, local, remote, remote_as_kwargs):
return local.label != remote.label or local.value_type != self.model.TEMBA_TYPES.get(remote.value_type)
class GroupSyncer(BaseSyncer):
"""
Syncer for contact groups
"""
model = Group
def local_kwargs(self, org, remote):
return {
"org": org,
"uuid": remote.uuid,
"name": remote.name,
"count": remote.count,
"is_dynamic": remote.query is not None,
}
def update_required(self, local, remote, remote_as_kwargs):
return (
local.name != remote.name
or local.count != remote.count
or local.is_dynamic != remote_as_kwargs["is_dynamic"]
)
class LabelSyncer(BaseSyncer):
"""
Syncer for message labels
"""
model = Label
def local_kwargs(self, org, remote):
# don't create locally if this is just the pseudo-label for flagging
if remote.name == SYSTEM_LABEL_FLAGGED:
return None
# don't create locally if there's an non-synced label with same name or UUID
for l in org.labels.all():
if not l.is_synced and (l.name == remote.name or l.uuid == remote.uuid):
return None
return {"org": org, "uuid": remote.uuid, "name": remote.name}
def update_required(self, local, remote, remote_as_kwargs):
return local.name != remote.name
def fetch_all(self, org):
return super(LabelSyncer, self).fetch_all(org).filter(is_synced=True)
class MessageSyncer(BaseSyncer):
"""
Syncer for messages
"""
model = Message
local_id_attr = "backend_id"
remote_id_attr = "id"
select_related = ("contact",)
prefetch_related = ("labels",)
def __init__(self, backend=None, as_handled=False):
super(MessageSyncer, self).__init__(backend)
self.as_handled = as_handled
def local_kwargs(self, org, remote):
if remote.visibility == "deleted":
return None
# labels are updated via a post save signal handler
labels = [(l.uuid, l.name) for l in remote.labels if l.name != SYSTEM_LABEL_FLAGGED]
kwargs = {
"org": org,
"backend_id": remote.id,
"type": "I" if remote.type == "inbox" else "F",
"text": remote.text,
"is_flagged": remote_message_is_flagged(remote),
"is_archived": remote_message_is_archived(remote),
"created_on": remote.created_on,
Message.SAVE_CONTACT_ATTR: (remote.contact.uuid, remote.contact.name),
Message.SAVE_LABELS_ATTR: labels,
}
# if syncer is set explicitly or message is too old, save as handled already
if self.as_handled or (now() - remote.created_on).days > MAXIMUM_HANDLE_MESSAGE_AGE:
kwargs["is_handled"] = True
return kwargs
def update_required(self, local, remote, remote_as_kwargs):
if local.is_flagged != remote_message_is_flagged(remote):
return True
if local.is_archived != remote_message_is_archived(remote):
return True
local_label_uuids = {l.uuid for l in local.labels.all()}
incoming_label_uuids = {l.uuid for l in remote.labels if l.name != SYSTEM_LABEL_FLAGGED}
return local_label_uuids != incoming_label_uuids
def delete_local(self, local):
local.release()
class RapidProBackend(BaseBackend):
"""
RapidPro instance as a backend
"""
# TODO reset to 100 when limit is fixed on RapidPro side
BATCH_SIZE = 99
FETCH_TIME_LIMIT = 30 * 60 # 30 minutes
@staticmethod
def _get_client(org):
return org.get_temba_client(api_version=2)
@staticmethod
def _counts(d: dict) -> Tuple[int, int, int, int]:
return d[SyncOutcome.created], d[SyncOutcome.updated], d[SyncOutcome.deleted], d[SyncOutcome.ignored]
def pull_contacts(self, org, modified_after, modified_before, progress_callback=None, resume_cursor: str = None):
client = self._get_client(org)
# all contacts created or modified in RapidPro in the time window
active_query = client.get_contacts(after=modified_after, before=modified_before)
fetches = active_query.iterfetches(retry_on_rate_exceed=True, resume_cursor=resume_cursor)
# all contacts deleted in RapidPro in the same time window
deleted_query = client.get_contacts(deleted=True, after=modified_after, before=modified_before)
deleted_fetches = deleted_query.iterfetches(retry_on_rate_exceed=True)
counts, resume_cursor = sync_local_to_changes(
org,
ContactSyncer(backend=self.backend),
fetches,
deleted_fetches,
progress_callback,
time_limit=self.FETCH_TIME_LIMIT,
)
return self._counts(counts) + (resume_cursor,)
def pull_fields(self, org):
client = self._get_client(org)
incoming_objects = client.get_fields().all(retry_on_rate_exceed=True)
return self._counts(sync_local_to_set(org, FieldSyncer(backend=self.backend), incoming_objects))
def pull_groups(self, org):
client = self._get_client(org)
incoming_objects = client.get_groups().all(retry_on_rate_exceed=True)
return self._counts(sync_local_to_set(org, GroupSyncer(backend=self.backend), incoming_objects))
def pull_labels(self, org):
client = self._get_client(org)
incoming_objects = client.get_labels().all(retry_on_rate_exceed=True)
return self._counts(sync_local_to_set(org, LabelSyncer(backend=self.backend), incoming_objects))
def pull_messages(
self, org, modified_after, modified_before, as_handled=False, progress_callback=None, resume_cursor: str = None
):
client = self._get_client(org)
# all incoming messages created or modified in RapidPro in the time window
query = client.get_messages(folder="incoming", after=modified_after, before=modified_before)
fetches = query.iterfetches(retry_on_rate_exceed=True, resume_cursor=resume_cursor)
counts, resume_cursor = sync_local_to_changes(
org,
MessageSyncer(backend=self.backend, as_handled=as_handled),
fetches,
[],
progress_callback,
time_limit=self.FETCH_TIME_LIMIT,
)
return self._counts(counts) + (resume_cursor,)
def push_label(self, org, label):
client = self._get_client(org)
temba_label = client.get_labels(name=label.name).first()
if temba_label:
remote = temba_label
else:
remote = client.create_label(name=label.name)
label.uuid = remote.uuid
label.save(update_fields=("uuid",))
def push_outgoing(self, org, outgoing, as_broadcast=False):
client = self._get_client(org)
# RapidPro currently doesn't send emails so we use the CasePro email system to send those instead
for_backend = []
for msg in outgoing:
if msg.urn and msg.urn.startswith("mailto:"):
to_address = msg.urn.split(":", 1)[1]
send_raw_email([to_address], "New message", msg.text, None)
else:
for_backend.append(msg)
if not for_backend:
return
if as_broadcast:
# we might not be able to send all as a single broadcast, so we batch
for batch in chunks(for_backend, self.BATCH_SIZE):
contact_uuids = []
urns = []
for msg in batch:
if msg.contact:
contact_uuids.append(msg.contact.uuid)
if msg.urn:
urns.append(msg.urn)
text = outgoing[0].text
broadcast = client.create_broadcast(text=text, contacts=contact_uuids, urns=urns)
for msg in batch:
msg.backend_broadcast_id = broadcast.id
Outgoing.objects.filter(pk__in=[o.id for o in batch]).update(backend_broadcast_id=broadcast.id)
else:
for msg in for_backend:
contact_uuids = [msg.contact.uuid] if msg.contact else []
urns = [msg.urn] if msg.urn else []
broadcast = client.create_broadcast(text=msg.text, contacts=contact_uuids, urns=urns)
msg.backend_broadcast_id = broadcast.id
msg.save(update_fields=("backend_broadcast_id",))
def resolve_urn(self, org, normalized_urn):
client = self._get_client(org)
contact = client.get_contacts(urn=normalized_urn)
if not contact.first():
contact = client.create_contact(urns=[normalized_urn])
else:
contact = contact.first()
return contact.uuid
def add_to_group(self, org, contact, group):
client = self._get_client(org)
client.bulk_add_contacts([contact.uuid], group=group.uuid)
def remove_from_group(self, org, contact, group):
client = self._get_client(org)
client.bulk_remove_contacts([contact.uuid], group=group.uuid)
def stop_runs(self, org, contact):
client = self._get_client(org)
client.bulk_interrupt_contacts(contacts=[contact.uuid])
def label_messages(self, org, messages, label):
client = self._get_client(org)
for batch in chunks(messages, self.BATCH_SIZE):
client.bulk_label_messages(messages=[m.backend_id for m in batch], label=label.uuid)
def unlabel_messages(self, org, messages, label):
client = self._get_client(org)
for batch in chunks(messages, self.BATCH_SIZE):
client.bulk_unlabel_messages(messages=[m.backend_id for m in batch], label=label.uuid)
def archive_messages(self, org, messages):
client = self._get_client(org)
for batch in chunks(messages, self.BATCH_SIZE):
client.bulk_archive_messages(messages=[m.backend_id for m in batch])
def archive_contact_messages(self, org, contact):
client = self._get_client(org)
client.bulk_archive_contact_messages(contacts=[contact.uuid])
def restore_messages(self, org, messages):
client = self._get_client(org)
for batch in chunks(messages, self.BATCH_SIZE):
client.bulk_restore_messages(messages=[m.backend_id for m in batch])
def flag_messages(self, org, messages):
client = self._get_client(org)
for batch in chunks(messages, self.BATCH_SIZE):
client.bulk_label_messages(messages=[m.backend_id for m in batch], label_name=SYSTEM_LABEL_FLAGGED)
def unflag_messages(self, org, messages):
client = self._get_client(org)
for batch in chunks(messages, self.BATCH_SIZE):
client.bulk_unlabel_messages(messages=[m.backend_id for m in batch], label_name=SYSTEM_LABEL_FLAGGED)
def fetch_contact_messages(self, org, contact, created_after, created_before):
"""
Used to grab messages sent to the contact from RapidPro that we won't have in CasePro
"""
# fetch remote messages for contact
client = self._get_client(org)
remote_messages = client.get_messages(contact=contact.uuid, after=created_after, before=created_before).all()
def remote_as_outgoing(msg):
return Outgoing(
backend_broadcast_id=msg.broadcast, contact=contact, text=msg.text, created_on=msg.created_on
)
"""
Fetches flows which can be used as a follow-up flow
"""
return [remote_as_outgoing(m) for m in remote_messages if m.direction == "out"]
def fetch_flows(self, org):
"""
Fetches flows which can be used as a follow-up flow
"""
flows = self._get_client(org).get_flows().all()
flows = [Flow(flow.uuid, flow.name) for flow in flows if not flow.archived]
return sorted(flows, key=lambda f: f.name)
def start_flow(self, org, flow, contact, extra):
"""
Starts the given contact in the given flow
"""
client = self._get_client(org)
client.create_flow_start(flow=flow.uuid, contacts=[str(contact.uuid)], restart_participants=True, params=extra)
def get_url_patterns(self):
"""
No urls to register as everything is pulled from RapidPro
"""
return []
| rapidpro/casepro | casepro/backend/rapidpro.py | Python | bsd-3-clause | 15,169 |
### resource API
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.core.files.uploadedfile import UploadedFile
import django.dispatch
from django.contrib.auth.models import User
from mezzanine.generic.models import Keyword, AssignedKeyword
from dublincore.models import QualifiedDublinCoreElement
from hs_core.hydroshare import hs_bagit
from hs_core.hydroshare.utils import get_resource_types
from hs_core.models import ResourceFile
from . import utils
import os
pre_create_resource = django.dispatch.Signal(providing_args=['dublin_metadata', 'files'])
post_create_resource = django.dispatch.Signal(providing_args=['resource'])
def get_resource(pk):
"""
Retrieve a resource identified by the pid from HydroShare. The response must contain the bytes of the indicated
resource, and the checksum of the bytes retrieved should match the checksum recorded in the system metadata for
that resource. The bytes of the resource will be encoded as a zipped BagIt archive; this archive will contain
resource contents as well as science metadata. If the resource does not exist in HydroShare, then
Exceptions.NotFound must be raised. Resources can be any unit of content within HydroShare that has been assigned a
pid.
Parameters: pk - Unique HydroShare identifier for the resource to be retrieved.
Returns: Bytes of the specified resource.
Return Type: OctetStream
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
Notes:
All resources and resource versions will have a unique internal HydroShare identifier (pid). A DOI will be
assigned to all formally published versions of a resource. For this method, passing in a pid (which is a HydroShare
internal identifer) would return a specific resource version corresponding to the pid. A DOI would have to be
resolved using HydroShare.resolveDOI() to get the pid for the resource, which could then be used with this method.
The obsoletion chain will be contained within the system metadata for resources and so it can be traversed by
calling HydroShare.getSystemMetadata().
"""
# 1. Look up the resource by ID
# 2. Check to see if a bagit file exists on IRODS for that resource
# 3.T.1. Return the bagit file
# 3.T.1. Return the bagit file
# 3.F.1. look up the resource serialization (tastypie) class in the resource type map
# 3.F.2. Serialize the resource to disk using TastyPie.
# 3.F.3. Create a bagit file from the serialized resource.
# 3.F.4. Return the bagit file
return utils.get_resource_by_shortkey(pk).bags.first()
def get_science_metadata(pk):
"""
Describes the resource identified by the pid by returning the associated science metadata object. If the resource
does not exist, Exceptions.NotFound must be raised.
REST URL: GET /scimeta/{pid}
Parameters: pk - Unique HydroShare identifier for the resource whose science metadata is to be retrieved.
Returns: Science metadata document describing the resource.
Return Type: ScienceMetadata
Raises: Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
return get_system_metadata(pk)
def get_system_metadata(pk):
"""
Describes the resource identified by the pid by returning the associated system metadata object. If the resource
does not exist, Exceptions.NotFound must be raised.
REST URL: GET /sysmeta/{pid}
Parameters: pk - Unique HydroShare identifier for the resource whose system metadata is to be retrieved.
Returns: System metadata document describing the resource.
Return Type: SystemMetadata
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
return utils.get_resource_by_shortkey(pk)
def get_resource_map(pk):
"""
Describes the resource identified by the pid by returning the associated resource map document. If the resource does
not exist, Exceptions.NotFound must be raised.
REST URL: GET /resourcemap/{pid}
Parameters: pid - Unique HydroShare identifier for the resource whose resource map is to be retrieved.
Returns: Resource map document describing the resource.
Return Type: ResourceMap
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
return utils.get_resource_by_shortkey(pk)
def get_capabilities(pk):
"""
Describes API services exposed for a resource. If there are extra capabilites for a particular resource type over
and above the standard Hydroshare API, then this API call will list these
REST URL: GET /capabilites/{pid}
Parameters: Unique HydroShare identifier for the resource whose capabilites are to be retrieved.
Return Type: Capabilites
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
res = utils.get_resource_by_shortkey(pk)
return getattr(res, 'extra_capabilities', lambda: None)()
def get_resource_file(pk, filename):
"""
Called by clients to get an individual file within a HydroShare resource.
REST URL: GET /resource/{pid}/files/{filename}
Parameters:
pid - Unique HydroShare identifier for the resource from which the file will be extracted.
filename - The data bytes of the file that will be extracted from the resource identified by pid
Returns: The bytes of the file extracted from the resource
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified does not exist or the file identified by filename does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
resource = utils.get_resource_by_shortkey(pk)
for f in ResourceFile.objects.filter(object_id=resource.id):
if os.path.basename(f.resource_file.name) == filename:
return f.resource_file
else:
raise ObjectDoesNotExist(filename)
def update_resource_file(pk, filename, f):
"""
Called by clients to update an individual file within a HydroShare resource.
REST URL: PUT /resource/{pid}/files/{filename}
Parameters:
pid - Unique HydroShare identifier for the resource from which the file will be extracted.
filename - The data bytes of the file that will be extracted from the resource identified by pid
file - the data bytes of the file to update
Returns: The bytes of the file extracted from the resource
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified does not exist or the file identified by filename does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
resource = utils.get_resource_by_shortkey(pk)
for rf in ResourceFile.objects.filter(object_id=resource.id):
if os.path.basename(rf.resource_file.name) == filename:
rf.resource_file.delete()
rf.resource_file = File(f) if not isinstance(f, UploadedFile) else f
rf.save()
return rf
else:
raise ObjectDoesNotExist(filename)
def get_revisions(pk):
"""
Returns a list of pids for resources that are revisions of the resource identified by the specified pid.
REST URL: GET /revisions/{pid}
Parameters: pid - Unique HydroShare identifier for the resource whose revisions are to be retrieved.
Returns: List of pids for resources that are revisions of the specified resource.
Return Type: List of pids
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The Resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
return utils.get_resource_by_shortkey(pk).bags.all()
def get_related(pk):
"""
Returns a list of pids for resources that are related to the resource identified by the specified pid.
REST URL: GET /related/{pid}
Parameters:
pid - Unique HydroShare identifier for the resource whose related resources are to be retrieved.
Returns: List of pids for resources that are related to the specified resource.
Return Type: List of pids
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
raise NotImplemented()
def get_checksum(pk):
"""
Returns a checksum for the specified resource using the MD5 algorithm. The result is used to determine if two
instances referenced by a pid are identical.
REST URL: GET /checksum/{pid}
Parameters:
pid - Unique HydroShare identifier for the resource for which the checksum is to be returned.
Returns: Checksum of the resource identified by pid.
Return Type: Checksum
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource specified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
"""
raise NotImplemented()
def create_resource(
resource_type, owner, title,
edit_users=None, view_users=None, edit_groups=None, view_groups=None,
keywords=None, dublin_metadata=None, metadata=None,
files=(), **kwargs):
"""
Called by a client to add a new resource to HydroShare. The caller must have authorization to write content to
HydroShare. The pid for the resource is assigned by HydroShare upon inserting the resource. The create method
returns the newly-assigned pid.
REST URL: POST /resource
Parameters:
resource - The data bytes of the resource to be added to HydroShare
Returns: The pid assigned to the newly created resource
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized to write to HydroShare
Exceptions.InvalidContent - The content of the resource is incomplete
Exception.ServiceFailure - The service is unable to process the request
Note: The calling user will automatically be set as the owner of the created resource.
Implementation notes:
1. pid is called short_id. This is because pid is a UNIX term for Process ID and could be confusing.
2. return type is an instance of a subclass of hs_core.models.AbstractResource. This is for efficiency in the
native API. The native API should return actual instance rather than IDs wherever possible to avoid repeated
lookups in the database when they are unnecessary.
3. resource_type is a string: see parameter list
:param resource_type: string. the classname of the resource type, such as GenericResource
:param owner: email address, username, or User instance. The owner of the resource
:param title: string. the title of the resource
:param edit_users: list of email addresses, usernames, or User instances who will be given edit permissions
:param view_users: list of email addresses, usernames, or User instances who will be given view permissions
:param edit_groups: list of group names or Group instances who will be given edit permissions
:param view_groups: list of group names or Group instances who will be given view permissions
:param keywords: string list. list of keywords to add to the resource
:param dublin_metadata: list of dicts containing keys { 'term', 'content' } respecting dublin core std.
:param metadata: list of dicts containing keys (element names) and corresponding values as dicts { 'creator': {'name':'John Smith'}}.
:param files: list of Django File or UploadedFile objects to be attached to the resource
:param kwargs: extra arguments to fill in required values in AbstractResource subclasses
:return: a new resource which is an instance of resource_type.
"""
for tp in get_resource_types():
if resource_type == tp.__name__:
cls = tp
break
else:
raise NotImplementedError("Type {resource_type} does not exist".format(resource_type=resource_type))
# Send pre-create resource signal
pre_create_resource.send(sender=cls, dublin_metadata=dublin_metadata, files=files, **kwargs)
owner = utils.user_from_id(owner)
# create the resource
resource = cls.objects.create(
user=owner,
creator=owner,
title=title,
last_changed_by=owner,
in_menus=[],
**kwargs
)
for file in files:
ResourceFile.objects.create(content_object=resource, resource_file=file)
resource.view_users.add(owner)
resource.edit_users.add(owner)
resource.owners.add(owner)
if edit_users:
for user in edit_users:
user = utils.user_from_id(user)
resource.edit_users.add(user)
resource.view_users.add(user)
if view_users:
for user in view_users:
user = utils.user_from_id(user)
resource.view_users.add(user)
if edit_groups:
for group in edit_groups:
group = utils.group_from_id(group)
resource.edit_groups.add(group)
resource.view_groups.add(group)
if view_groups:
for group in view_groups:
group = utils.group_from_id(group)
resource.view_groups.add(group)
if keywords:
ks = [Keyword.objects.get_or_create(title=k) for k in keywords]
ks = zip(*ks)[0] # ignore whether something was created or not. zip is its own inverse
for k in ks:
AssignedKeyword.objects.create(content_object=resource, keyword=k)
# for creating metadata elements based on the old metadata implementation
if dublin_metadata:
for d in dublin_metadata:
QualifiedDublinCoreElement.objects.create(
term=d['term'],
content=d['content'],
content_object=resource
)
# for creating metadata elements based on the new metadata implementation
if metadata:
for element in metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = element.items()[0]
resource.metadata.create_element(k, **v)
# add the subject elements from the AssignedKeywords (new metadata implementation)
for akw in AssignedKeyword.objects.filter(object_pk=resource.id).all():
resource.metadata.create_element('subject', value=akw.keyword.title)
hs_bagit.create_bag(resource)
# Send post-create resource signal
post_create_resource.send(sender=cls, resource=resource)
return resource
def update_resource(
pk,
edit_users=None, view_users=None, edit_groups=None, view_groups=None,
keywords=None, dublin_metadata=None, metadata=None,
*files, **kwargs):
"""
Called by clients to update a resource in HydroShare.
REST URL: PUT /resource/{pid}
Parameters:
pid - Unique HydroShare identifier for the resource that is to be updated.
resource - The data bytes of the resource that will update the existing resource identified by pid
Returns: The pid assigned to the updated resource
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.InvalidContent - The content of the resource is incomplete
Exception.ServiceFailure - The service is unable to process the request
Notes:
For mutable resources (resources that have not been formally published), the update overwrites existing data and
metadata using the resource that is passed to this method. If a user wants to create a copy or modified version of a
mutable resource this should be done using HydroShare.createResource().
For immutable resources (formally published resources), this method creates a new resource that is a new version of
formally published resource. HydroShare will record the update by storing the SystemMetadata.obsoletes and
SystemMetadata.obsoletedBy fields for the respective resources in their system metadata.HydroShare MUST check or set
the values of SystemMetadata.obsoletes and SystemMetadata.obsoletedBy so that they accurately represent the
relationship between the new and old objects. HydroShare MUST also set SystemMetadata.dateSysMetadataModified. The
modified system metadata entries must then be available in HydroShare.listObjects() to ensure that any cataloging
systems pick up the changes when filtering on SystmeMetadata.dateSysMetadataModified. A formally published resource
can only be obsoleted by one newer version. Once a resource is obsoleted, no other resources can obsolete it.
"""
resource = utils.get_resource_by_shortkey(pk)
if files:
ResourceFile.objects.filter(object_id=resource.id).delete()
for file in files:
ResourceFile.objects.create(
content_object=resource,
resource_file=File(file) if not isinstance(file, UploadedFile) else file
)
if 'owner' in kwargs:
owner = utils.user_from_id(kwargs['owner'])
resource.view_users.add(owner)
resource.edit_users.add(owner)
resource.owners.add(owner)
if edit_users:
resource.edit_users.clear()
for user in edit_users:
user = utils.user_from_id(user)
resource.edit_users.add(user)
resource.view_users.add(user)
if view_users:
resource.view_users.clear()
for user in view_users:
user = utils.user_from_id(user)
resource.view_users.add(user)
if edit_groups:
resource.edit_groups.clear()
for group in edit_groups:
group = utils.group_from_id(group)
resource.edit_groups.add(group)
resource.view_groups.add(group)
if view_groups:
resource.edit_groups.clear()
for group in view_groups:
group = utils.group_from_id(group)
resource.view_groups.add(group)
if keywords:
AssignedKeyword.objects.filter(object_pk=resource.id).delete()
ks = [Keyword.objects.get_or_create(title=k) for k in keywords]
ks = zip(*ks)[0] # ignore whether something was created or not. zip is its own inverse
for k in ks:
AssignedKeyword.objects.create(content_object=resource, keyword=k)
# for creating metadata elements based on the old metadata implementation
if dublin_metadata:
QualifiedDublinCoreElement.objects.filter(object_id=resource.id).delete()
for d in dublin_metadata:
QualifiedDublinCoreElement.objects.create(
term=d['term'],
content=d['content'],
content_object=resource
)
# for creating metadata elements based on the new metadata implementation
if metadata:
_update_science_metadata(resource, metadata)
return resource
def add_resource_files(pk, *files):
"""
Called by clients to update a resource in HydroShare by adding a single file.
REST URL: PUT /resource/{pid}/files/{file}
Parameters:
pid - Unique HydroShare identifier for the resource that is to be updated.
file - The data bytes of the file that will be added to the existing resource identified by pid
Returns: The pid assigned to the updated resource
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.InvalidContent - The content of the file is invalid
Exception.ServiceFailure - The service is unable to process the request
Notes:
For mutable resources (resources not formally published), the update adds the file that is passed
to this method to the resource. For immutable resources (formally published resources), this method creates a new
resource that is a new version of the formally published resource. HydroShare will record the update by storing the
SystemMetadata.obsoletes and SystemMetadata.obsoletedBy fields for the respective resources in their system metadata
HydroShare MUST check or set the values of SystemMetadata.obsoletes and SystemMetadata.obsoletedBy so that they
accurately represent the relationship between the new and old objects. HydroShare MUST also set
SystemMetadata.dateSysMetadataModified. The modified system metadata entries must then be available in
HydroShare.listObjects() to ensure that any cataloging systems pick up the changes when filtering on
SystmeMetadata.dateSysMetadataModified. A formally published resource can only be obsoleted by one newer version.
Once a resource is obsoleted, no other resources can obsolete it.
"""
resource = utils.get_resource_by_shortkey(pk)
ret = []
for file in files:
ret.append(ResourceFile.objects.create(
content_object=resource,
resource_file=File(file) if not isinstance(file, UploadedFile) else file
))
return ret
def update_system_metadata(pk, **kwargs):
"""
"""
return update_science_metadata(pk, **kwargs)
def update_science_metadata(pk, dublin_metadata=None, metadata=None, keywords=None, **kwargs):
"""
Called by clients to update the science metadata for a resource in HydroShare.
REST URL: PUT /scimeta/{pid}
Parameters:
pid - Unique HydroShare identifier for the resource that is to be updated.
ScienceMetadata - The data bytes of the ScienceMetadata that will update the existing Science Metadata for the resource
identified by pid
Returns: The pid assigned to the resource whose Science Metadata was updated
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.InvalidContent - The content of the resource is incomplete
Exception.ServiceFailure - The service is unable to process the request
Notes:
For mutable resources (resources that have not been formally published), the update overwrites existing Science
Metadata using the ScienceMetadata that is passed to this method. For immutable resources (formally published
resources), this method creates a new resource that is a new version of the formally published resource. HydroShare
will record the update by storing the SystemMetadata.obsoletes and SystemMetadata.obsoletedBy fields for the
respective resources in their system metadata. HydroShare MUST check or set the values of SystemMetadata.obsoletes
and SystemMetadata.obsoletedBy so that they accurately represent the relationship between the new and old objects.
hydroShare MUST also set SystemMetadata.dateSysMetadataModified. The modified system metadata entries must then be
available in HydroShare.listObjects() to ensure that any cataloging systems pick up the changes when filtering on
SystmeMetadata.dateSysMetadataModified. A formally published resource can only be obsoleted by one newer version.
Once a resource is obsoleted, no other resources can obsolete it.
"""
resource = utils.get_resource_by_shortkey(pk)
if keywords:
AssignedKeyword.objects.filter(object_pk=resource.id).delete()
ks = [Keyword.objects.get_or_create(title=k) for k in keywords]
ks = zip(*ks)[0] # ignore whether something was created or not. zip is its own inverse
for k in ks:
AssignedKeyword.objects.create(content_object=resource.id, keyword=k)
# for creating metadata elements based on the old metadata implementation
if dublin_metadata:
QualifiedDublinCoreElement.objects.filter(object_id=resource.id).delete()
for d in dublin_metadata:
QualifiedDublinCoreElement.objects.create(
term=d['term'],
content=d['content'],
content_object=resource
)
# for creating metadata elements based on the new metadata implementation
if metadata:
_update_science_metadata(resource, metadata)
if kwargs:
for field, value in kwargs.items():
setattr(resource, field, value)
resource.save()
def delete_resource(pk):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an administrator to perform
this function. The operation removes the resource from further interaction with HydroShare services and interfaces. The
implementation may delete the resource bytes, and should do so since a delete operation may be in response to a problem
with the resource (e.g., it contains malicious content, is inappropriate, or is subject to a legal request). If the
resource does not exist, the Exceptions.NotFound exception is raised.
REST URL: DELETE /resource/{pid}
Parameters:
pid - The unique HydroShare identifier of the resource to be deleted
Returns:
The pid of the resource that was deleted
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
Note: Only HydroShare administrators will be able to delete formally published resour
"""
utils.get_resource_by_shortkey(pk).delete()
return pk
def delete_resource_file(pk, filename):
"""
Deletes an individual file from a HydroShare resource. If the file does not exist, the Exceptions.NotFound exception
is raised.
REST URL: DELETE /resource/{pid}/files/{filename}
Parameters:
pid - The unique HydroShare identifier for the resource from which the file will be deleted
filename - Name of the file to be deleted from the resource
Returns: The pid of the resource from which the file was deleted
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist or the file identified by file does not exist
Exception.ServiceFailure - The service is unable to process the request
Note: For mutable resources (resources that have not been formally published), this method modifies the resource by
deleting the file. For immutable resources (formally published resources), this method creates a new resource that
is a new version of the formally published resource. HydroShare will record the update by storing the
SystemMetadata.obsoletes and SystemMetadata.obsoletedBy fields for the respective resources in their system metadata
HydroShare MUST check or set the values of SystemMetadata.obsoletes and SystemMetadata.obsoletedBy so that they
accurately represent the relationship between the new and old objects. HydroShare MUST also set
SystemMetadata.dateSysMetadataModified. The modified system metadata entries must then be available in
HydroShare.listObjects() to ensure that any cataloging systems pick up the changes when filtering on
SystmeMetadata.dateSysMetadataModified. A formally published resource can only be obsoleted by one newer
version. Once a resource is obsoleted, no other resources can obsolete it.
"""
resource = utils.get_resource_by_shortkey(pk)
for f in ResourceFile.objects.filter(object_id=resource.id):
if os.path.basename(f.resource_file.name) == filename:
f.resource_file.delete()
f.delete()
break
else:
raise ObjectDoesNotExist(filename)
return filename
def publish_resource(pk):
"""
Formally publishes a resource in HydroShare. Triggers the creation of a DOI for the resource, and triggers the
exposure of the resource to the HydroShare DataONE Member Node. The user must be an owner of a resource or an
adminstrator to perform this action.
REST URL: PUT /publishResource/{pid}
Parameters: pid - Unique HydroShare identifier for the resource to be formally published.
Returns: The pid of the resource that was published
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
Note: This is different than just giving public access to a resource via access control rul
"""
resource = utils.get_resource_by_shortkey(pk)
resource.published_and_frozen = True
resource.frozen = True
resource.edit_users = []
resource.edit_groups = []
resource.save()
def resolve_doi(doi):
"""
Takes as input a DOI and returns the internal HydroShare identifier (pid) for a resource. This method will be used
to get the HydroShare pid for a resource identified by a doi for further operations using the web service API.
REST URL: GET /resolveDOI/{doi}
Parameters: doi - A doi assigned to a resource in HydroShare.
Returns: The pid of the resource that was published
Return Type: pid
Raises:
Exceptions.NotAuthorized - The user is not authorized
Exceptions.NotFound - The resource identified by pid does not exist
Exception.ServiceFailure - The service is unable to process the request
Note: All HydroShare methods (except this one) will use HydroShare internal identifiers (pids). This method exists
so that a program can resolve the pid for a DOI.
"""
return utils.get_resource_by_doi(doi).short_id
def create_metadata_element(resource_short_id, element_model_name, **kwargs):
"""
Creates a specific type of metadata element for a given resource
:param resource_short_id: id of the resource for which a metadata element needs to be created
:param element_model_name: metadata element name (e.g., creator)
:param kwargs: metadata element attribute name/value pairs for all those attributes that require a value
:return:
"""
res = utils.get_resource_by_shortkey(resource_short_id)
res.metadata.create_element(element_model_name, **kwargs)
def update_metadata_element(resource_short_id, element_model_name, element_id, **kwargs):
"""
Updates the data associated with a metadata element for a specified resource
:param resource_short_id: id of the resource for which a metadata element needs to be updated
:param element_model_name: metadata element name (e.g., creator)
:param element_id: id of the metadata element to be updated
:param kwargs: metadata element attribute name/value pairs for all those attributes that need update
:return:
"""
res = utils.get_resource_by_shortkey(resource_short_id)
res.metadata.update_element(element_model_name, element_id, **kwargs)
def delete_metadata_element(resource_short_id, element_model_name, element_id):
"""
Deletes a specific type of metadata element for a specified resource
:param resource_short_id: id of the resource for which metadata element to be deleted
:param element_model_name: metadata element name (e.g., creator)
:param element_id: id of the metadata element to be deleted
:return:
"""
res = utils.get_resource_by_shortkey(resource_short_id)
res.metadata.delete_element(element_model_name, element_id)
def get_science_metadata_xml(resource_short_id):
"""
Gets science metadata as an xml string for a specified resource
:param resource_short_id: id of the resource
:return: science metadata as an xml string
"""
res = utils.get_resource_by_shortkey(resource_short_id)
return res.metadata.get_xml()
def _update_science_metadata(resource, metadata):
# delete all existing elements in the metadata container object
# note: we can't delete the metadata container object as it would delete the associated
# resource object (cascade delete)
resource.metadata.delete_all_elements()
# add the few of the metadata elements that need to be
# created from the resource properties (like title, abstract, created date etc)
resource.metadata.create_element('title', value=resource.title)
if resource.content:
resource.metadata.create_element('description', abstract=resource.content)
else:
resource.metadata.create_element('description', abstract=resource.description)
resource.metadata.create_element('creator', name=resource.creator.get_full_name(), email=resource.creator.email)
resource.metadata.create_element('date', type='created', start_date=resource.created)
resource.metadata.create_element('date', type='modified', start_date=resource.updated)
resource.metadata.create_element('identifier', name='hydroShareIdentifier',
url='http://hydroshare.org/resource{0}{1}'.format('/', resource.short_id))
# TODO: add the type element (once we have an url for the resource type
# add the subject elements from the AssignedKeywords
for akw in AssignedKeyword.objects.filter(object_pk=resource.id).all():
resource.metadata.create_element('subject', value=akw.keyword.title)
# then create the rest of the elements form the user provided data
for element in metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and corresponding values
k, v = element.items()[0]
resource.metadata.create_element(k, **v)
| hydroshare/hydroshare_temp | hs_core/hydroshare/resource.py | Python | bsd-3-clause | 34,421 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reusable utility functions.
This file is generic and can be reused by other models without modification.
"""
import multiprocessing
import subprocess
import tensorflow as tf
from tensorflow.python.lib.io import file_io
FINAL_MODEL_DIR = 'model'
METADATA_FILE = 'metadata.yaml'
def _copy_all(src_files, dest_dir):
subprocess.check_call(['gsutil', '-q', '-m', 'cp'] + src_files + [dest_dir])
def _recursive_copy(src_dir, dest_dir):
subprocess.check_call(['gsutil', '-q', '-m', 'rsync', src_dir, dest_dir])
class ExportLastModelMonitor(tf.contrib.learn.monitors.ExportMonitor):
def __init__(self,
export_dir,
dest,
additional_assets=None,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=None):
super(ExportLastModelMonitor, self).__init__(
every_n_steps=0,
export_dir=export_dir,
input_fn=input_fn,
input_feature_key=input_feature_key,
exports_to_keep=exports_to_keep,
signature_fn=signature_fn,
default_batch_size=default_batch_size)
self._dest = dest
self._additional_assets = additional_assets or []
def every_n_step_end(self, step, outputs):
# We only care about the last export.
pass
def end(self, session=None):
super(ExportLastModelMonitor, self).end(session)
file_io.recursive_create_dir(self._dest)
_recursive_copy(self.last_export_dir, self._dest)
if self._additional_assets:
# TODO(rhaertel): use the actual assets directory. For now, metadata.yaml
# must be a sibling of the export.meta file.
assets_dir = self._dest
file_io.create_dir(assets_dir)
_copy_all(self._additional_assets, assets_dir)
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
example_id, encoded_example = tf.TFRecordReader(options=options).read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count)
def override_if_not_in_args(flag, argument, args):
"""Checks if flags is in args, and if not it adds the flag to args."""
if flag not in args:
args.extend([flag, argument])
| obulpathi/cloud | ml/tensorflow/iris/trainer/util.py | Python | apache-2.0 | 4,441 |
from indivo.tests.internal_tests import InternalTests, enable_transactions
from indivo.models import PHA
from indivo.tests.data.record import TEST_RECORDS
from indivo.tests.data.app import TEST_USERAPPS, TEST_AUTONOMOUS_APPS
from indivo.tests.data.app import TEST_SMART_MANIFESTS, TEST_USERAPP_MANIFESTS
from indivo.tests.data.account import TEST_ACCOUNTS
try:
from django.utils import simplejson
except ImportError:
try:
import simplejson
except ImportError:
raise ImportError("Couldn't find an installation of SimpleJSON")
from django.db import IntegrityError, transaction
class PHAModelUnitTests(InternalTests):
def setUp(self):
super(PHAModelUnitTests, self).setUp()
# A userapp
self.app = self.createUserApp(TEST_USERAPPS, 0)
# An account
self.account = self.createAccount(TEST_ACCOUNTS, 0)
# A record, and one of its builtin carenets
self.record = self.createRecord(TEST_RECORDS, 0, owner=self.account)
self.carenet = self.record.carenet_set.all()[0]
def tearDown(self):
super(PHAModelUnitTests, self).tearDown()
@enable_transactions
def test_construction(self):
# Should be able to construct normally, autonomous or not
try:
a = self.createUserApp(TEST_USERAPPS, 1)
except:
self.fail('Unable to construct UserApp with standard args')
else:
self.assertEqual(a, PHA.objects.get(pk=a.pk))
try:
a2 = self.createUserApp(TEST_AUTONOMOUS_APPS, 0)
except:
self.fail('Unable to construct Autonomous UserApp with standard args')
else:
sid = transaction.savepoint()
self.assertEqual(a2, PHA.objects.get(pk=a2.pk))
# Should not be able to construct two apps with same email
try:
a3 = self.createUserApp(TEST_USERAPPS, 1, force_create=True)
except:
transaction.savepoint_rollback(sid)
else:
self.fail('Constructed two UserApps with the same email')
# Even if one is autonomous
try:
overrides = {'is_autonomous':True}
a4 = self.createUserApp(TEST_USERAPPS, 1, force_create=True, **overrides)
except:
transaction.rollback()
else:
self.fail('Constructed a UserApp and an AutonomousUserApp with the same email')
def test_accesscontrol(self):
# test isInCarenet
self.assertFalse(self.app.isInCarenet(self.carenet))
# add it to the carenet
self.addAppToCarenet(self.app, self.carenet)
# re-assert
self.assertTrue(self.app.isInCarenet(self.carenet))
def test_from_manifest(self):
all_manifests = TEST_SMART_MANIFESTS + TEST_USERAPP_MANIFESTS
# test that save=False works
for manifest, credentials in all_manifests:
num_phas = PHA.objects.count()
app = PHA.from_manifest(manifest, credentials, save=False)
self.assertEqual(num_phas, PHA.objects.count())
# should work with a SMART manifest
for manifest, credentials in TEST_SMART_MANIFESTS:
parsed_m, parsed_c, app = self.buildAppFromManifest(PHA, manifest, credentials)
self.assertValidUserAppManifest(parsed_m, parsed_c, app)
# Or with Indivo-specific manifest extensions
for manifest, credentials in TEST_USERAPP_MANIFESTS:
parsed_m, parsed_c, app = self.buildAppFromManifest(PHA, manifest, credentials)
self.assertValidUserAppManifest(parsed_m, parsed_c, app)
def test_to_manifest(self):
for manifest, credentials in TEST_SMART_MANIFESTS:
app = PHA.from_manifest(manifest, credentials, save=False)
parsed_m = simplejson.loads(manifest)
reparsed_m = simplejson.loads(app.to_manifest(smart_only=True))
# The reparsed manifest should contain AT LEAST as much info as the original
for k, v in parsed_m.iteritems():
# URIs might have been expanded if they were relative
if k in ['index', 'oauth_callback_url', 'icon']:
self.assertTrue(reparsed_m.get(k, None).endswith(v))
else:
self.assertEqual(v, reparsed_m.get(k, None))
for manifest, credentials in TEST_USERAPP_MANIFESTS:
app = PHA.from_manifest(manifest, credentials, save=False)
parsed_m = simplejson.loads(manifest)
reparsed_m = simplejson.loads(app.to_manifest())
# The reparsed manifest should contain AT LEAST as much info as the original
for k, v in parsed_m.iteritems():
# URIs might have been expanded if they were relative
if k in ['index', 'oauth_callback_url', 'icon']:
self.assertTrue(reparsed_m.get(k, None).endswith(v))
else:
self.assertEqual(v, reparsed_m.get(k, None))
def buildAppFromManifest(self, model_cls, manifest, credentials):
parsed_m = simplejson.loads(manifest)
parsed_c = simplejson.loads(credentials)
num_apps = model_cls.objects.count()
app = model_cls.from_manifest(manifest, credentials)
self.assertEqual(num_apps + 1, model_cls.objects.count())
return (parsed_m, parsed_c, app)
def assertValidUserAppManifest(self, parsed_m, parsed_c, app):
self.assertEqual(parsed_c['consumer_key'], app.consumer_key)
self.assertEqual(parsed_c['consumer_secret'], app.secret)
self.assertEqual(parsed_m['name'], app.name)
self.assertEqual(parsed_m['id'], app.email)
self.assertTrue(app.start_url_template.endswith(parsed_m.get('index', ''))) # Might have been expanded if it was a relative URI
self.assertTrue(app.callback_url.endswith(parsed_m.get('oauth_callback_url',''))) # SMART apps won't define this
autonomous_p = parsed_m.get('mode', '') == 'background'
self.assertEqual(autonomous_p, app.is_autonomous)
self.assertEqual(parsed_m.get('autonomous_reason', ''), app.autonomous_reason) # SMART apps won't define this
has_ui_p = parsed_m['has_ui'] if parsed_m.has_key('has_ui') else parsed_m.has_key('index')
self.assertEqual(has_ui_p, app.has_ui)
frameable_p = parsed_m['frameable'] if parsed_m.has_key('frameable') else parsed_m.has_key('index')
self.assertEqual(frameable_p, app.frameable)
self.assertEqual(parsed_m.get('description', ''), app.description)
self.assertEqual(parsed_m.get('author', ''), app.author)
self.assertEqual(parsed_m.get('version', ''), app.version)
self.assertTrue(app.icon_url.endswith(parsed_m.get('icon', ''))) # Might have been expanded if it was a relative URI
self.assertEqual(parsed_m.get('requires', {}), simplejson.loads(app.requirements))
| sayan801/indivo_server | indivo/tests/unit/models/pha.py | Python | gpl-3.0 | 7,025 |
import os
import pickle
import sys
def main():
dbpath = sys.argv[1]
textdir = sys.argv[2]
with open(dbpath, 'rb') as f:
passages = pickle.load(f)
for p in passages:
with open(os.path.join(textdir, p.ID + '.tagged')) as f:
tokens = []
for line in f:
tokens.extend(line.split())
for terminal in p.layer('0').all:
num_tokens = len(terminal.text.split())
curr, tokens = tokens[:num_tokens], tokens[num_tokens:]
tags = [x.split('_')[1] for x in curr]
terminal.extra['postag'] = " ".join(tags)
with open(dbpath + '.tags', 'wb') as f:
pickle.dump(passages, f)
if __name__ == '__main__':
main()
| borgr/ucca | scenes/postag_passages.py | Python | gpl-3.0 | 755 |
from time import sleep
from org.myrobotlab.service import Speech
from org.myrobotlab.framework import MRLListener
# this subscribe is easy shorthand method
# Name it "speech".
speech = Runtime.create("speech","Speech")
speech.startService()
speech.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Laura&txt=")
serial1 = Runtime.start('serial1','Serial')
serial2 = Runtime.start('serial2','Serial')
speech.speak("hello")
serial1.connect('COM8')
serial2.connect('COM6')
# python.subscribe('serial1','publishRX')
# python.subscribe('serial2','publishRX')
# this subscribe with 4 parameters has all details - subscribe to and callback info
# we subscribe to one service's topic to one method and the other to a different method
# this is how its done in latest mrl - nice no ?
# python.subscribe('serial1','publishRX', python.getName(), 'serial1RX')
# python.subscribe('serial2','publishRX', python.getName(), 'serial2RX')
# this is how its done in 119
listener1 = MRLListener('publishRX', 'python', 'serial1RX', None)
serial1.addListener(listener1)
listener2 = MRLListener('publishRX', 'python', 'serial2RX', None)
serial2.addListener(listener2)
# i want this to be the data from serial1
def serial1RX(data):
print(data)
num = data
#num = chr(data)
#print(num)
if (num == 1):
speech.speak("1")
if (num == 2):
speech.speak("2")
if (num == 3):
speech.speak("3")
if (num == 4):
speech.speak("4")
if (num == 5):
speech.speak("5")
if (num == 6):
speech.speak("6")
if (num == 7):
speech.speak("7")
if (num == 8):
speech.speak("8")
if (num == 9):
speech.speak("9")
if (num == 10):
speech.speak("10")
if (num == 11):
speech.speak("11")
if (num == 12):
speech.speak("12")
# and this to be the data from serial2
def serial2RX(data):
print(data)
num = data
#num = chr(data)
#print(num)
if (num == 1):
speech.speak("1")
if (num == 2):
speech.speak("2")
if (num == 3):
speech.speak("3")
if (num == 4):
speech.speak("4")
if (num == 5):
speech.speak("5")
if (num == 6):
speech.speak("6")
if (num == 7):
speech.speak("7")
if (num == 8):
speech.speak("8")
if (num == 9):
speech.speak("9")
if (num == 10):
speech.speak("10")
if (num == 11):
speech.speak("11")
if (num == 12):
speech.speak("12")
| MyRobotLab/pyrobotlab | home/Markus/Skin.py | Python | apache-2.0 | 2,542 |
from __future__ import absolute_import
from django.contrib import messages
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .exceptions import SourceFileError
def check_updated(modeladmin, request, queryset):
count = 0
for source in queryset:
try:
source.check_source_data()
except SourceFileError as exception:
messages.error(request, _('Error opening file for source: %s; %s') % (source, str(exception)))
else:
count += 1
if len(queryset) == 1:
message_bit = 'Source file was checked for update.'
else:
message_bit = '%s sources were checked for update.' % len(queryset)
modeladmin.message_user(request, message_bit)
check_updated.short_description = _('Check for updated source file')
def clear_versions(modeladmin, request, queryset):
count = 0
for source in queryset:
try:
source.clear_versions()
except IOError:
messages.error(request, _('Error opening file for source: %s') % source)
else:
count += 1
if len(queryset) == 1:
message_bit = 'Source versions were deleted.'
else:
message_bit = '%s sources versions were deleted.' % len(queryset)
modeladmin.message_user(request, message_bit)
clear_versions.short_description = _('Clear all source versions')
#clone_objects Copyright (C) 2009 Rune Bromer
#http://www.bromer.eu/2009/05/23/a-generic-copyclone-action-for-django-11/
def clone_objects(objects, title_fieldnames):
def clone(from_object, title_fieldnames):
args = dict([(fld.name, getattr(from_object, fld.name))
for fld in from_object._meta.fields
if fld is not from_object._meta.pk])
args.pop('id')
for field in from_object._meta.fields:
if field.name in title_fieldnames:
if isinstance(field, models.CharField):
args[field.name] = getattr(from_object, field.name) + (' (%s) ' % unicode(_('copy')))
return from_object.__class__.objects.create(**args)
if not hasattr(objects, '__iter__'):
objects = [objects]
# We always have the objects in a list now
objs = []
for obj in objects:
obj = clone(obj, title_fieldnames)
obj.save()
objs.append(obj)
def clone(self, request, queryset):
clone_objects(queryset, ('name', 'slug'))
if queryset.count() == 1:
message_bit = _('1 source was')
else:
message_bit = _('%s sources were') % queryset.count()
self.message_user(request, _('%s copied.') % message_bit)
clone.short_description = _('Copy the selected source')
| commonwealth-of-puerto-rico/libre | libre/apps/data_drivers/actions.py | Python | gpl-3.0 | 2,728 |
from optparse import make_option
from django.core.management.base import BaseCommand
from fluff.pillow import FluffPillowProcessor
from pillowtop.utils import get_pillow_by_name
class Command(BaseCommand):
option_list = BaseCommand.option_list + (make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'),)
def handle(self, *args, **options):
pillow = get_pillow_by_name(args[0])
if not options['noinput']:
confirm = raw_input(
"""
You have requested to wipe %s table
Type 'yes' to continue, or 'no' to cancel:
""" % pillow.pillow_id
)
if confirm != 'yes':
print "\tWipe cancelled."
return
processor = FluffPillowProcessor(pillow.indicator_class)
engine = processor.get_sql_engine()
table = pillow.indicator_class().table
engine.execute(table.delete())
| qedsoftware/commcare-hq | corehq/ex-submodules/fluff/management/commands/wipe_fluff_table.py | Python | bsd-3-clause | 1,243 |
""" yluo - 05/01/2016 creation
Preprocess i2b2/VA relations to generate data files ready to used by Seg-CNN
"""
__author__= """Yuan Luo (yuan.hypnos.luo@gmail.com)"""
__revision__="0.5"
import numpy as np
import cPickle
from collections import defaultdict
import sys, re, os
import pandas as pd
import data_util as du
trp_rel = ['TrIP', 'TrWP', 'TrCP', 'TrAP', 'TrNAP']
tep_rel = ['TeRP', 'TeCP']
pp_rel = ['PIP']
dosages = ['mg', 'bid', 'prn', 'qd', 'po', 'tid', 'qhs', 'qid', 'qod']
def load_stoplist(fn):
## in case you want to use stopword list, not really needed
h = {}
f = open(fn)
for ln in f:
swd = ln.rstrip(' \n')
if not h.has_key(swd):
h[swd] = 0
h[swd] += 1
return h;
def include_wd(wd):
ans = re.search(r'[A-Za-z]', wd) != None or wd in '/:;()[]{}-+?'
ans &= not 'year-old' in wd and 'y/o' != wd and 'yo' != wd and 'y.o.' != wd
ans &= not '&' in wd and not '**' in wd
ans &= re.search(r'[0-9]', wd) == None
ans &= re.search(r'^[^A-Za-z].+', wd) == None # cannot start with nonletter
return ans;
def clean_wds(wdsin, hstop={}, strict=True):
wdsout = []
for wd in wdsin:
if not strict or include_wd(wd):
wd = re.sub('"', '', wd)
wd = du.removeNonAscii(wd)
if not hstop.has_key(wd):
if strict and ('-' in wd and '-' != wd):
for swd in wd.split('-'):
if len(swd)>1:
wdsout.append(swd)
else:
if strict and len(wd)>1:
wd = re.sub('[^A-Za-z]*$', '', wd)
if len(wd)>0:
wdsout.append(wd)
return wdsout;
def load_mask(words, iid):
hmask = {}
wid = 0
st = -1; end = -1
for word in words:
if re.search(r'^\*\*.*\[', word):
st = wid
if st != -1 and re.search(r'\]', word): # can start and end on the same word
end = wid
hmask[(st, end+1)] = 1
if st == -1:
print('mask err in %s' % (iid))
st = -1
wid += 1
if st != -1:
print('mask unfinished in %s' % (iid))
hmask[(st, wid+1)] = 1
return hmask;
def inmask(i, hmask):
res = False
for mask in hmask:
mst = mask[0]
mend = mask[1]
if mst <= i and i < mend:
res = True
break
return res;
def mask_concept(words, st, end, hmask, hproblem, htreatment, htest):
mwds = [words[i] for i in range(st, end) if not inmask(i, hproblem) and not inmask(i, htreatment) and not inmask(i, htest) and not inmask(i, hmask)]
return mwds;
def fmask(words, st, end, hmask={}, hproblem={}, htreatment={}, htest={}, mask=False, skip_concept=False):
mwds = []; out = True
for i in range(st, end):
if not (mask and inmask(i, hmask)):
if skip_concept and inmask(i, hproblem):
if out:
mwds += ['problem']
out = False
elif skip_concept and inmask(i, htreatment):
if out:
mwds += ['treatment']
out = False
elif skip_concept and inmask(i, htest):
if out:
mwds += ['test']
out = False
else:
mwds += [words[i]]
out = True
return mwds;
def concept_type(cs, ce, iid, hproblem, htreatment, htest):
if (cs, ce) in hproblem:
ct = 'problem'
elif (cs, ce) in htreatment:
ct = 'treatment'
elif (cs, ce) in htest:
ct = 'test'
else:
print('should not be here %s %s in %s' % (cs, ce, iid))
return ct;
def markup_sen(words, c1s, c1e, c2s, c2e, iid, hproblem, htreatment, htest):
c1t = concept_type(c1s, c1e, iid, hproblem, htreatment, htest)
c2t = concept_type(c2s, c2e, iid, hproblem, htreatment, htest)
if c1s < c2s:
mwords = words[:c1s] + ['['] + words[c1s:c1e] + [']%s' % (c1t)] + words[c1e:c2s] + ['['] + words[c2s:c2e] + [']%s' % (c2t)] + words[c2e:]
else:
mwords = words[:c2s] + ['['] + words[c2s:c2e] + [']%s' % (c2t)] + words[c2e:c1s] + ['['] + words[c1s:c1e] + [']%s' % (c1t)] + words[c1e:]
return mwords;
def build_inst(iid, c1s, c1e, c2s, c2e, sen, vocab, hlen, rel='None', padlen=0, hstop={}, hproblem={}, htreatment={}, htest={}, mask=False, skip_concept=False):
words = sen.split()
hmask = load_mask(words, iid)
c1 = clean_wds(fmask(words, c1s, c1e, hmask), hstop, strict=False)
c2 = clean_wds(fmask(words, c2s, c2e, hmask), hstop, strict=False)
c1t = concept_type(c1s, c1e, iid, hproblem, htreatment, htest)
c2t = concept_type(c2s, c2e, iid, hproblem, htreatment, htest)
cts = '_'.join(sorted((c1t, c2t)))
prec_end = min(c1s, c2s); succ_start = max(c1e,c2e)
prec = clean_wds(fmask(words, 0, prec_end, hmask, hproblem, htreatment, htest, mask, skip_concept), hstop)
succ = clean_wds(fmask(words, succ_start, len(words), hmask, hproblem, htreatment, htest, mask, skip_concept), hstop)
mid = clean_wds(fmask(words, min(c1e,c2e), max(c1s,c2s), hmask, hproblem, htreatment, htest, mask, skip_concept), hstop)
hlen[cts]['c1'] = max(hlen[cts]['c1'], len(c1))
hlen[cts]['c2'] = max(hlen[cts]['c2'], len(c2))
hlen[cts]['mid'] = max(hlen[cts]['mid'], len(mid))
if c1s < c2s:
c1 = prec[-padlen:] + c1 + mid[:padlen]
c2 = mid[-padlen:] + c2 + succ[:padlen]
else:
c1 = mid[-padlen:] + c1 + succ[:padlen]
c2 = prec[-padlen:] + c2 + mid[:padlen]
prec = prec[-padlen:]
succ = succ[:padlen]
hlen[cts]['prec'] = max(hlen[cts]['prec'], len(prec))
hlen[cts]['succ'] = max(hlen[cts]['succ'], len(succ))
mwords = markup_sen(words, c1s, c1e, c2s, c2e, iid, hproblem, htreatment, htest)
datum = {'iid':iid,
'rel':rel,
'c1': c1,
'c2': c2,
'prec': prec,
'succ': succ,
'mid': mid,
'sen': ' '.join(mwords)}
return datum;
def add_none_rel(fn, hpair, sens, rels, vocab, hlen, mask=False, mid_lmax=None, padlen=0, hstop={}, hproblem={}, htreatment={}, htest={}, skip_concept=False):
for senid in hpair:
for con_pair in hpair[senid]:
c1s = con_pair[0][0]
c1e = con_pair[0][1]
c2s = con_pair[1][0]
c2e = con_pair[1][1]
sen = sens[senid].lower()
iid = '%s:%s (%d,%d) (%d,%d)' % (fn, senid, c1s, c1e, c2s, c2e)
midlen = max(c1s,c2s) - min(c1e,c2e)
if mid_lmax != None and midlen > mid_lmax:
continue
datum = build_inst(iid, c1s, c1e, c2s, c2e, sen, vocab, hlen, padlen=padlen, hstop=hstop, hproblem=hproblem[senid], htreatment=htreatment[senid], htest=htest[senid], mask=mask, skip_concept=skip_concept)
if datum != None:
rels.append(datum)
def load_con(fncon, htrp, htep, hpp):
hproblem = defaultdict(list)
htreatment = defaultdict(list)
htest = defaultdict(list)
lc = 0
with open(fncon, 'r') as f:
lc += 1
for ln in f:
ln = ln.rstrip(' \n')
mo = re.search(r'c=".*?" (\d+):(\d+) \d+:(\d+)\|\|t="(.*?)"', ln)
if mo:
senid = int(mo.group(1))-1 # start with 1 in annotation
cs = int(mo.group(2))
ce = int(mo.group(3))+1
ctype = mo.group(4)
if ctype == "problem":
hproblem[senid].append((cs, ce))
elif ctype == "treatment":
htreatment[senid].append((cs, ce))
elif ctype == "test":
htest[senid].append((cs,ce))
else:
print('unrecognized ctype %s at %d in %s' % (ctype, lc, fncon))
# sort the concepts according to positions
for senid in htreatment:
htreatment[senid] = sorted(htreatment[senid], key=lambda x: x[0])
for senid in htest:
htest[senid] = sorted(htest[senid], key=lambda x: x[0])
for senid in hproblem:
hproblem[senid] = sorted(hproblem[senid], key=lambda x: x[0])
# genereate all possible pairs for treatment problem
for senid in htreatment:
for tr in htreatment[senid]:
for p in hproblem[senid]:
htrp[senid][(tr, p)] = 1
# test problem pair
for senid in htest:
for te in htest[senid]:
for p in hproblem[senid]:
htep[senid][(te, p)] = 1
# problem pair
for senid in hproblem:
for i in range(len(hproblem[senid])-1):
for j in range(i+1,len(hproblem[senid])):
p1 = hproblem[senid][i]
p2 = hproblem[senid][j]
if p1[0] < p2[0]:
hpp[senid][(p1, p2)] = 1
else:
print('collapsed %s and %s in %d in %s' % (p1, p2, senid, fncon))
return (hproblem, htreatment, htest)
def load_rel(fnrel, sens, htrp, htep, hpp, vocab, hlen, trp_data, tep_data, pp_data, mask=False, padlen=0, hstop={}, hproblem={}, htreatment={}, htest={}, skip_concept=False, pip_reorder=False):
sen_seen = {}
fnroot = re.sub(r'^.*/', '', fnrel)
with open(fnrel, 'r') as f:
lc = 0; trp_mid_lmax = 0; tep_mid_lmax = 0; pp_mid_lmax = 0
for ln in f:
ln = ln.rstrip(' \n')
lc += 1
mo = re.search(r'c=".*?" (\d+):(\d+) \d+:(\d+)\|\|r="(.*?)"\|\|c=".*?" \d+:(\d+) \d+:(\d+)', ln)
if mo:
senid = int(mo.group(1))-1 # start with 1 in annotation
c1s = int(mo.group(2))
c1e = int(mo.group(3))+1
rel = mo.group(4)
c2s = int(mo.group(5))
c2e = int(mo.group(6))+1
sen = sens[senid].lower()
if not sen_seen.has_key(senid):
words = sen.split()
for word in set(clean_wds(words, hstop)):
vocab[word] += 1
sen_seen[senid] = 1
iid = '%s:%s (%d,%d) (%d,%d)' % (fnroot, senid, c1s, c1e, c2s, c2e)
if pip_reorder and rel == 'PIP':
datum = build_inst(iid, min(c1s,c2s), min(c1e,c2e), max(c1s,c2s), max(c1e,c2e), sen, vocab, hlen, rel, padlen=padlen, hstop=hstop, hproblem=hproblem[senid], htreatment=htreatment[senid], htest=htest[senid], mask=mask, skip_concept=skip_concept)
else:
datum = build_inst(iid, c1s, c1e, c2s, c2e, sen, vocab, hlen, rel, padlen=padlen, hstop=hstop, hproblem=hproblem[senid], htreatment=htreatment[senid], htest=htest[senid], mask=mask, skip_concept=skip_concept)
midlen = max(c1s,c2s) - min(c1e,c2e)
con_pair = ((c1s, c1e), (c2s, c2e))
con_pair2 = ((c2s, c2e), (c1s, c1e))
if rel in trp_rel:
trp_data.append(datum)
if not htrp[senid].has_key(con_pair):
print('no trp pair %s in %s' % (con_pair,iid))
htrp[senid].pop(con_pair, None)
trp_mid_lmax = max(trp_mid_lmax, midlen)
elif rel in tep_rel:
tep_data.append(datum)
if not htep[senid].has_key(con_pair):
print('no tep pair %s in %s' % (con_pair,iid))
htep[senid].pop(con_pair, None)
tep_mid_lmax = max(tep_mid_lmax, midlen)
elif rel in pp_rel:
pp_data.append(datum)
if not hpp[senid].has_key(con_pair) and not hpp[senid].has_key(con_pair2):
print('no pp pair %s in %s' % (con_pair,iid))
hpp[senid].pop(con_pair, None)
hpp[senid].pop(con_pair2, None)
pp_mid_lmax = max(pp_mid_lmax, midlen)
else:
print('unrecognized rel %s' % (rel))
else:
print('non-matching line %d in %s' % (lc, fnrel))
add_none_rel(fnroot, htrp, sens, trp_data, vocab, hlen, mask=mask, padlen=padlen, hstop=hstop, hproblem=hproblem, htreatment=htreatment, htest=htest, skip_concept=skip_concept)
add_none_rel(fnroot, htep, sens, tep_data, vocab, hlen, mask=mask, padlen=padlen, hstop=hstop, hproblem=hproblem, htreatment=htreatment, htest=htest, skip_concept=skip_concept)
add_none_rel(fnroot, hpp, sens, pp_data, vocab, hlen, mask=mask, padlen=padlen, hstop=hstop, hproblem=hproblem, htreatment=htreatment, htest=htest, skip_concept=skip_concept)
return;
def build_data(dn, vocab, hlen, mask=False, padlen=0, hstop={}, skip_concept=False, pip_reorder=False):
"""
Loads data
"""
trp_data = [] # problem treatment
tep_data = [] # problem test
pp_data = [] # problem problem
dntxt = '%s/txt' % (dn)
dnrel = '%s/rel' % (dn)
dncon = '%s/concept' % (dn)
fc = 0
for fntxt in os.listdir(dntxt):
htrp = defaultdict(dict)
htep = defaultdict(dict)
hpp = defaultdict(dict)
if not(re.search(r'.txt$', fntxt)):
continue
fc += 1
fnrel = re.sub('.txt', '.rel', fntxt)
fncon = re.sub('.txt', '.con', fntxt)
sens = []
with open('%s/%s' % (dntxt, fntxt), "r") as f:
for ln in f:
ln = ln.rstrip(' \n')
sens.append(ln)
(hproblem, htreatment, htest) = load_con('%s/%s' % (dncon, fncon), htrp, htep, hpp)
load_rel('%s/%s' % (dnrel, fnrel), sens, htrp, htep, hpp, vocab, hlen, trp_data, tep_data, pp_data, mask=mask, padlen=padlen, hstop=hstop, hproblem=hproblem, htreatment=htreatment, htest=htest, skip_concept=skip_concept, pip_reorder=pip_reorder)
print(fc)
return trp_data, tep_data, pp_data
def build_train_test(cdn='/n/data1/hms/dbmi/zaklab/yluo/semrel', hlen = defaultdict(lambda:defaultdict(float)), padlen=0, fnstop=None, skip_concept=False, pip_reorder=False):
hstop = {}; vocab = defaultdict(float)
if fnstop != None:
hstop=load_stoplist(fnstop)
trp_upmcp_tr, tep_upmcp_tr, pp_upmcp_tr = build_data('%s/Relation_Challenge_Data/Released/concept_assertion_relation_training/upmcp' % (cdn), vocab, hlen, mask=True, padlen=padlen, hstop=hstop, skip_concept=skip_concept, pip_reorder=pip_reorder)
print('upmcp_tr %d' % (len(trp_upmcp_tr)))
trp_upmcd_tr, tep_upmcd_tr, pp_upmcd_tr = build_data('%s/Relation_Challenge_Data/Released/concept_assertion_relation_training/upmcd' % (cdn), vocab, hlen, mask=True, padlen=padlen, hstop=hstop, skip_concept=skip_concept, pip_reorder=pip_reorder)
print('upmcd_tr %d' % (len(trp_upmcd_tr)))
trp_beth_tr, tep_beth_tr, pp_beth_tr = build_data('%s/Relation_Challenge_Data/Released/concept_assertion_relation_training/beth' % (cdn), vocab, hlen, mask=True, padlen=padlen, hstop=hstop, skip_concept=skip_concept, pip_reorder=pip_reorder)
print('beth_tr %d' % (len(trp_beth_tr)))
trp_partners_tr, tep_partners_tr, pp_partners_tr = build_data('%s/Relation_Challenge_Data/Released/concept_assertion_relation_training/partners' % (cdn), vocab, hlen, padlen=padlen, hstop=hstop, skip_concept=skip_concept, pip_reorder=pip_reorder)
print('partners_tr %d' % (len(trp_partners_tr)))
trp_rel_te, tep_rel_te, pp_rel_te = build_data('%s/Relation_Challenge_Data/Released/concept_assertion_relation_test' % (cdn), vocab, hlen, mask=True, padlen=padlen, hstop=hstop, skip_concept=skip_concept, pip_reorder=pip_reorder)
trp_rel_tr = trp_upmcp_tr + trp_upmcd_tr + trp_beth_tr + trp_partners_tr
tep_rel_tr = tep_upmcp_tr + tep_upmcd_tr + tep_beth_tr + tep_partners_tr
pp_rel_tr = pp_upmcp_tr + pp_upmcd_tr + pp_beth_tr + pp_partners_tr
return trp_rel_tr, tep_rel_tr, pp_rel_tr, trp_rel_te, tep_rel_te, pp_rel_te, vocab, hlen
def embed_train_test(fnem, fnwid='../data/vocab.txt', fndata='../data/semrel.p', padlen=0, fnstop=None, skip_concept=True, pip_reorder=False, binEmb=False):
trp_rel_tr, tep_rel_tr, pp_rel_tr, trp_rel_te, tep_rel_te, pp_rel_te, vocab, hlen = build_train_test(padlen=padlen, fnstop=fnstop, skip_concept=skip_concept, pip_reorder=pip_reorder)
fwid = open(fnwid, 'w')
for wd in sorted(vocab.keys()):
if vocab[wd] >= 1:
fwid.write('%s\n' % (wd))
else:
vocab.pop(wd, None)
fwid.close()
if binEmb:
mem, hwoov, hwid = du.load_bin_vec(fnem, fnwid)
else:
mem, hwoov, hwid = du.indexEmbedding(fnem, fnwid)
mem = mem.astype('float32')
# the saved data are lists of relation dicts, with keys c1, c2 ,etc.
cPickle.dump([trp_rel_tr, tep_rel_tr, pp_rel_tr, trp_rel_te, tep_rel_te, pp_rel_te, vocab, dict(hlen), mem, hwoov, hwid], open(fndata, "wb"))
print "dataset created!"
return mem, hwoov, hwid;
| yuanluo/seg_cnn | src/cnn_preprocess.py | Python | mit | 17,257 |
import sys
import os
import shutil
import re
def remove_console(text):
return re.sub('console.(log|debug)\((.*)\);?', '', text)
me_filename = 'mediaelement'
mep_filename = 'mediaelementplayer'
combined_filename = 'mediaelement-and-player'
# BUILD MediaElement (single file)
print('building MediaElement.js')
me_files = []
me_files.append('me-header.js')
me_files.append('me-namespace.js')
me_files.append('me-utility.js')
me_files.append('me-plugindetector.js')
me_files.append('me-featuredetection.js')
me_files.append('me-mediaelements.js')
me_files.append('me-shim.js')
me_files.append('me-i18n.js')
me_files.append('me-i18n-locale-de.js')
me_files.append('me-i18n-locale-zh.js')
code = ''
for item in me_files:
src_file = open('js/' + item,'r')
code += src_file.read() + "\n"
code = remove_console(code)
tmp_file = open('../build/' + me_filename + '.js','w')
tmp_file.write(code)
tmp_file.close()
# BUILD MediaElementPlayer (single file)
print('building MediaElementPlayer.js')
mep_files = []
mep_files.append('mep-header.js')
mep_files.append('mep-library.js')
mep_files.append('mep-player.js')
mep_files.append('mep-feature-playpause.js')
mep_files.append('mep-feature-stop.js')
mep_files.append('mep-feature-progress.js')
mep_files.append('mep-feature-time.js')
mep_files.append('mep-feature-volume.js')
mep_files.append('mep-feature-fullscreen.js')
mep_files.append('mep-feature-tracks.js')
mep_files.append('mep-feature-contextmenu.js')
mep_files.append('mep-feature-postroll.js')
# mep_files.append('mep-feature-sourcechooser.js')
code = ''
for item in mep_files:
src_file = open('js/' + item,'r')
code += src_file.read() + "\n"
code = remove_console(code)
tmp_file = open('../build/' + mep_filename + '.js','w')
tmp_file.write(code)
tmp_file.close()
# MINIFY both scripts
print('Minifying JavaScript')
# os.system("java -jar yuicompressor-2.4.2.jar ../build/" + me_filename + ".js -o ../build/" + me_filename + ".min.js --charset utf-8 -v")
# os.system("java -jar yuicompressor-2.4.2.jar ../build/" + mep_filename + ".js -o ../build/" + mep_filename + ".min.js --charset utf-8 -v")
os.system("java -jar compiler.jar --js ../build/" + me_filename + ".js --js_output_file ../build/" + me_filename + ".min.js")
os.system("java -jar compiler.jar --js ../build/" + mep_filename + ".js --js_output_file ../build/" + mep_filename + ".min.js")
# PREPEND intros
def addHeader(headerFilename, filename):
# get the header text
tmp_file = open(headerFilename)
header_txt = tmp_file.read();
tmp_file.close()
# read the current contents of the file
tmp_file = open(filename)
file_txt = tmp_file.read()
tmp_file.close()
# open the file again for writing
tmp_file = open(filename, 'w')
tmp_file.write(header_txt)
# write the original contents
tmp_file.write(file_txt)
tmp_file.close()
addHeader('js/me-header.js', '../build/' + me_filename + '.min.js')
addHeader('js/mep-header.js', '../build/' + mep_filename + '.min.js')
# COMBINE into single script
print('Combining scripts')
code = ''
src_file = open('../build/' + me_filename + '.js','r')
code += src_file.read() + "\n"
src_file = open('../build/' + mep_filename + '.js','r')
code += src_file.read() + "\n"
tmp_file = open('../build/' + combined_filename + '.js','w')
tmp_file.write(code)
tmp_file.close()
code = ''
src_file = open('../build/' + me_filename + '.min.js','r')
code += src_file.read() + "\n"
src_file = open('../build/' + mep_filename + '.min.js','r')
code += src_file.read() + "\n"
tmp_file = open('../build/' + combined_filename + '.min.js','w')
tmp_file.write(code)
tmp_file.close()
# MINIFY CSS
print('Minifying CSS')
src_file = open('css/mediaelementplayer.css','r')
tmp_file = open('../build/mediaelementplayer.css','w')
tmp_file.write(src_file.read())
tmp_file.close()
os.system("java -jar yuicompressor-2.4.2.jar ../build/mediaelementplayer.css -o ../build/mediaelementplayer.min.css --charset utf-8 -v")
#COPY skin files
print('Copying Skin Files')
shutil.copy2('css/controls.png','../build/controls.png')
shutil.copy2('css/controls.svg','../build/controls.svg')
shutil.copy2('css/bigplay.png','../build/bigplay.png')
shutil.copy2('css/bigplay.svg','../build/bigplay.svg')
shutil.copy2('css/loading.gif','../build/loading.gif')
shutil.copy2('css/mejs-skins.css','../build/mejs-skins.css')
shutil.copy2('css/controls-ted.png','../build/controls-ted.png')
shutil.copy2('css/controls-wmp.png','../build/controls-wmp.png')
shutil.copy2('css/controls-wmp-bg.png','../build/controls-wmp-bg.png')
print('DONE!')
| seekmas/wujiayao | web/bundles/mediaelement/src/Builder.py | Python | mit | 4,545 |
# Copyright (C) 2014 Andreas M. Weller <andreas.m.weller@gmail.com>
#
# read a bedtools output file from
#
# bedtools coverage -abam Q2PL2_H01_N.bam -b TSB_148_gene_panel_HP_amplicons.bed -d > test_coverage.csv
#
# and find bases with coverage or strand_ratio below the threshold
# general modules
import pandas as pd
import sys
import subprocess
import os
import matplotlib.pylab as plt
import numpy as np
import pprint
import logging
import re
import seaborn as sns
sns.set(font="serif") # prevents error message about fonts
# personal modules from the same folder
import plot_exon_coverage_v2 as plotting
import plot_exon_coverage_all_samples as all_sample_plotting
import unite_coverage_files as UniteCoverage
import CoverageCheckClasses
from CoverageCheckConfig import *
script_folder = os.path.dirname(os.path.realpath(__file__)) + "/"
DISTNAME = 'coveragecheck'
MAINTAINER = 'Andreas Weller'
MAINTAINER_EMAIL = 'andreas.m.weller@gmail.com'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/aweller/CoverageCheck/'
VERSION = '0.1'
#####################################################################################################################
def parse_exons_into_dataframe_and_dict(exon_filename):
"""
Parse a list of all human exons into 2 datastructures:
1. exons (pandas df)
2. exons_per_gene (dictionary of gene:[start, stop, exon_no])
"""
#exon_filename = "/home/andreas/bioinfo/core/general/data/HumanExons_Ensembl_v65_merged.csv"
#exon_filename = "/home/andreas/bioinfo/core/general/data/HumanExons_Ensembl_v75_all_genes_merged.csv"
header = ["chrom", "exon_start", "exon_stop", "gene", "exon_no", "strand"]
exons = pd.read_csv(exon_filename, sep="\t", names=header)
exons["gene_upper"] = exons.gene.str.upper()
exons = exons.sort(columns = ["gene", "exon_start", "exon_stop"])
global exons_per_gene
exons_per_gene = {}
for _, row in exons.iterrows():
gene = row["gene"].upper()
start, stop = int(row["exon_start"]), int(row["exon_stop"])
exon_no = row["exon_no"]
if not exons_per_gene.get(gene):
exons_per_gene[gene] = []
exons_per_gene[gene].append((start, stop, exon_no))
return exons, exons_per_gene
def parse_coverage_file_into_dataframe(coverage_file):
"""
Parses the coverage file, transforms into a per-base matrix and returns a pandas DF
"""
header = ["chr", "start", "stop", "amplicon", "na", "strand", "amplicon_pos", "dp"]
rawdf = pd.read_csv(coverage_file, sep="\t", names=header)
rawdf["pos"] = rawdf.start +rawdf.amplicon_pos
rawdf["chrompos"] = rawdf.apply(lambda x : "\t".join([str(x["chr"]), str(x["pos"]) ]), axis = 1 )
rawdf["gene"] = rawdf.apply(lambda x : x["amplicon"].split("_")[0], axis = 1 )
# df: per base
df = dict(chrom = rawdf.chr.groupby(rawdf.chrompos).min(),
pos = rawdf.pos.groupby(rawdf.chrompos).min(),
gene = rawdf.gene.groupby(rawdf.chrompos).min(),
start = rawdf.start.groupby(rawdf.chrompos).min(),
stop = rawdf.stop.groupby(rawdf.chrompos).min(),
minus_dp = rawdf[rawdf.strand == "-"].dp.groupby(rawdf.chrompos).max(),
plus_dp = rawdf[rawdf.strand == "+"].dp.groupby(rawdf.chrompos).max(),
dp = rawdf.dp.groupby(rawdf.chrompos).sum(),
)
df = pd.DataFrame(df).reset_index()
def get_strand_ratios(row):
minus = row["minus_dp"]
plus = row["plus_dp"]
if plus + minus == 0:
return 0
if plus == 0:
return minus
elif minus == 0:
return plus
else:
larger = max([plus, minus])
smaller = min([plus, minus])
return abs(larger/float(smaller))
df["strand_ratio"] = df.apply(get_strand_ratios, axis =1 )
df = df.sort(columns = ["chrom", "pos"])
return df
def parse_amplicons_into_df(bed, exons):
"""
Parse the amplicons in the bedfile into a DF
Get rid of amplicons that are too far away from any exon (as defined in the exon file)
"""
header = ["chrom", "start", "stop", "amplicon", "na", "strand"]
raw_amplicons = pd.read_csv(bed, sep="\t", names=header)
raw_amplicons["gene"] = raw_amplicons.apply(lambda x: x["amplicon"].split("_")[0].upper(), axis=1)
raw_amplicons["altgene"] = raw_amplicons.apply(lambda x: x["amplicon"].split("_")[1].upper(), axis=1)
def crosscheck_gene_chromosome(row):
""" Check if this row is close enough to an exon """
offset = 5000
gene = row["gene"]
altgene = row["altgene"]
chrom = row["chrom"]
start = row["start"]
stop = row["stop"]
gexons = exons[exons.gene == gene]
if len(gexons) > 0:
altgexons = exons[exons.gene == altgene]
if len(gexons) < len(altgexons):
gexons = altgexons
chrom_ok = chrom == gexons.chrom.unique()[0]
start_ok = start > gexons.exon_start.min() - offset
stop_ok = stop < gexons.exon_start.max() + offset
return all([chrom_ok, start_ok, stop_ok])
else:
#print "gexons empty for", gene
return False
raw_amplicons["correct_chrom"] = raw_amplicons.apply(crosscheck_gene_chromosome, axis=1)
amplicons = raw_amplicons[raw_amplicons.correct_chrom]
amplicons = amplicons.drop_duplicates(cols=["start", "stop"]) # as in the 'separate strand' beds each exon is present twice
return amplicons
#####################################################################################################################
def find_bad_positions(coverage_matrix, target_folder = None, trait = None, samplename = None,
trait_cutoff = None, whitelist = None):
"""
Walk through all bases and find contigous regions of bases that fail the coverage/strandbias cutoff
Create an output file of these regions
If a base an expected variant (recorded in a ExpectedVariants instance ('whitelist'), record the trait to the instance)
Return the updated whitelist
"""
region = None
last_gene = None
last_pos = 0
last_gene = None
total_bases = 0
bad_bases = 0
sample = samplename
bad_output_name = target_folder + sample + "_failed_regions_%s_cutoff_%s.csv" % (trait, trait_cutoff)
bad_output = open(bad_output_name, "w")
if trait == "strandbias":
output_header = ["gene", "chrom", "start", "stop", "mean_strand_bias", "size"]
elif trait == "coverage":
output_header = ["gene", "chrom", "start", "stop", "mean_coverage", "size"]
bad_output.write("\t".join(output_header) + "\n")
for index, pandas_dict in coverage_matrix.iterrows():
chrompos, chrom, dp, gene, minus_dp, plus_dp, pos, start, stop, strand_ratio = pandas_dict
row = [chrom, start, stop, gene, ".", "NA", (pos-start), strand_ratio]
row = "\t".join([str(x) for x in row])
start = int(start)
stop = int(stop)
pos = int(pos)
if whitelist:
if whitelist.dict.get(chrompos):
whitelist.add_coverage(chrompos, dp)
whitelist.add_strand_ratio(chrompos, strand_ratio)
##########################################################
pass_check = False
if trait == "coverage":
if dp > trait_cutoff:
pass_check = True
elif trait == "strandbias":
if strand_ratio < trait_cutoff:
pass_check = True
total_bases += 1
if not pass_check:
bad_bases += 1
##########################################################
if gene != last_gene or last_pos != (pos - 1):
if region:
result = region.print_output()
bad_output.write(result + "\n")
region = None
elif not pass_check:
if region:
region.add_row(row)
else:
region = CoverageCheckClasses.BadRegion(row)
else:
if region:
result = region.print_output()
bad_output.write(result + "\n")
region = None
last_chrom = chrom
last_pos = pos
last_gene = gene
bad_output.close()
######################################################################
good_bases = total_bases - bad_bases
pass_percent = 100 * round( good_bases / float(total_bases), 3)
if trait == "strandbias":
sampleinfo.add_strandbias(sample, [pass_percent, good_bases, total_bases])
logging.info( "%s percent (%s/%s) of positions in %s have a strand bias below the threshold (%s:1)."
% (pass_percent, good_bases, total_bases, sample, trait_cutoff) )
elif trait == "coverage":
sampleinfo.add_coverage(sample, [pass_percent, good_bases, total_bases])
logging.info( "%s percent (%s/%s) of positions in %s have at least the minimum coverage of %sX."
% (pass_percent, good_bases, total_bases, sample, trait_cutoff) )
return whitelist
#####################################################################################################################
def run_bedtools_coverage(bam, output, bed = None):
""" Run bedtools coverage to get the per-base coverage in the target bam. """
bedtools_cmd = "bedtools coverage -s -d -abam %s -b %s > %s" % (bam, bed, output)
logging.debug( bedtools_cmd )
output_code = subprocess.call(bedtools_cmd, shell=True)
if output_code != 0:
logging.critical( "Bedtools coverage run error." )
logging.critical( bedtools_cmd )
logging.critical( "Sorry, aborting..." )
sys.exit()
def run_bedtools_intersect(bed):
""" Run bedtools intersect to reduce the exon file to the exons with an amplicon overlap. """
output = bed.replace(".bed", "_covered_exon_locations.bed")
bedtools_cmd = "intersectBed -u -a %s/input/%s -b %s > %s" % (script_folder, human_exon_file, bed, output)
logging.debug( bedtools_cmd )
output_code = subprocess.call(bedtools_cmd, shell=True)
if output_code != 0:
logging.critical( "Bedtools intersect run error." )
logging.critical( bedtools_cmd )
logging.critical( "Sorry, aborting..." )
sys.exit()
return output
def check_bed_filetype(filename):
"""
Check if the bedfile is actually a bed and convert it to one if its an Illumina Manifest file
"""
rows = open(filename).readlines()
if not "Header" in rows[0]:
if all([len(row.split("\t")) == 6 for row in rows]):
logging.info( "Correct bed file detected: "+ filename )
return filename
else:
logging.critical( "Error in input bed file:", filename )
logging.critical( "Not all rows contain 6 fields as is expected." )
logging.critical( "Sorry, aborting..." )
sys.exit()
else: # this is an Illumina manifest file
logging.error( "Input is not a bed file, but an Illumina manifest file: " + filename )
expected_bed = filename[:-4] + "_plusminus.bed"
if os.path.exists(expected_bed):
logging.info( "Switching to existing bed file: " + expected_bed )
return expected_bed
else:
logging.info( "Converting manifest to bed." )
import manifest2bed as m2b
all_out, plusminus = m2b.convert_manifest(filename)
logging.info( "Switching to newly created bed file: " + plusminus )
return plusminus
def fix_gene_names_in_bedfile(bed, gene_alias_filename):
"""
Creates a new bedfile that has all genenames changed according to the gene_aliases file
Return the name of the new bedfile
"""
fixed_bed_name = bed.replace(".bed", "_fixed_genenames.bed")
logging.info( "Gene Name Aliases detected: " + gene_alias_filename )
if os.path.exists(fixed_bed_name):
logging.info( "Switching to existing fixed bed file: " + fixed_bed_name )
return fixed_bed_name
else:
logging.info( "Fixing gene names in bedfile." )
gene_aliases = {}
with open(gene_alias_filename) as handle:
for row in handle:
row = row.strip()
old, new = re.split("[\t ]", row) # split the row regardsless of tab or space
gene_aliases[old] = new
out = open(fixed_bed_name, "w")
with open(bed) as handle:
for row in handle:
f = row.strip().split("\t")
amplicon = f[3]
gene = amplicon.split("_")[0]
new_gene = gene_aliases.get(gene, gene)
new_amplicon = amplicon.replace(gene, new_gene)
f[3] = new_amplicon
result = "\t".join(f)
out.write(result + "\n")
if amplicon != new_amplicon:
logging.debug("Replaced %s with %s" % (amplicon, new_amplicon))
out.close()
return fixed_bed_name
def remove_empty_files_from_folder(folder):
for filename in os.listdir(folder):
try:
if os.path.getsize(folder +"/"+ filename) == 0:
os.remove(folder +"/"+ filename)
except:
pass
#####################################################################################################################
#####################################################################################################################
def run(bed, target_folder, min_dp, max_strand_ratio, whitelist_filename=None, gene_alias_filename=None, target_bams_filename=None):
if whitelist_filename == "None":
whitelist_filename = None
if gene_alias_filename == "None":
gene_alias_filename = None
if target_bams_filename == "None":
target_bams_filename = None
remove_empty_files_from_folder(target_folder) # remove empty files that might have been left over from previous runs
if " " in target_folder:
logging.critical( "The third-party tools used by CoverageCheck don't accept spaces in folder names." )
logging.critical( "Please replace the spaces in your bam folder with underscores.")
logging.critical( "Sorry, aborting..." )
##############################################################################################
# configure logging to both sys.stdout and a file
logging_filename = "CoverageCheck_log.txt"
print "Log messages printed to %s" % (logging_filename)
# set up logging to file
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
datefmt='%d-%m-%y %H:%M',
filename=logging_filename)
# set up logging to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info( "-" * 100 )
logging.info( "Minimum accepted coverage per base: %sX" % (min_dp) )
logging.info( "Maximum accepted coverage ratio between strands: %s:1" % (max_strand_ratio) )
#####################################################
# setup variables and report information
bed = check_bed_filetype(bed)
if gene_alias_filename:
bed = fix_gene_names_in_bedfile(bed, gene_alias_filename)
# parse all exons and all proper amplicons into DataFrames
exon_filename = run_bedtools_intersect(bed)
exons, exons_per_gene = parse_exons_into_dataframe_and_dict(exon_filename)
amplicons = parse_amplicons_into_df(bed, exons)
# setup the class that collects stats on each sample
global sampleinfo
sampleinfo = CoverageCheckClasses.SampleInfo()
# parse list of expected variants
if whitelist_filename:
variant_no = len([x for x in open(whitelist_filename).readlines() if x[0] != "#"])
logging.info( "Found %s expected variants to check in %s" % (variant_no, whitelist_filename) )
if variant_no == 0:
logging.critical( "ERROR: expected variant file doesn't contain variants." )
sys.exit()
else:
logging.info( "No expected variants specified." )
# parse list of allowed bams
target_folder = target_folder + "/" # just to be on the safe side
bams = [x for x in os.listdir(target_folder) if x.endswith(".bam")]
logging.info( "Found %s bams in the target folder %s" % (len(bams), target_folder))
if target_bams_filename:
target_bams = [x.strip() for x in open(target_folder + target_bams_filename).readlines()]
target_bams = [x+".bam" for x in target_bams if not x.endswith(".bam")]
logging.info( "Parsed list of %s allowed bams in %s" % (len(target_bams), target_bams_filename) )
bams = [x for x in bams if x in target_bams]
logging.info( "%s bams left to process in the target folder." % len(bams) )
logging.info( "-" * 100 )
##############################################################################################
# process each samples separately
for bam in bams:
samplename = bam.split(".")[0]
sample_output_folder = "%s/%s_results/" % (target_folder, samplename)
sampleinfo.add_sample(samplename)
if not os.path.exists(sample_output_folder):
os.makedirs(sample_output_folder)
##############################################################################################
# create the coverage file (if necessary) and parse it into a pandas DF
bedtools_output = target_folder + samplename + "_coverage.csv"
if not os.path.exists(bedtools_output):
logging.info( "Running bedtools coverage for: " + bam )
run_bedtools_coverage(target_folder + bam, output = bedtools_output, bed=bed)
coverage_matrix = parse_coverage_file_into_dataframe(bedtools_output)
##############################################################################################
# Initialize the class instance thats collects information on each expected variant
whitelist = None
if whitelist_filename:
whitelist = CoverageCheckClasses.ExpectedVariants(whitelist_filename, samplename=samplename, folder=sample_output_folder,
dp_cutoff = min_dp, strandbias = max_strand_ratio, exon_dict = exons_per_gene)
##############################################################################################
# run
if output_undercovered_regions:
whitelist = find_bad_positions(coverage_matrix, target_folder = sample_output_folder, trait = "coverage",
samplename = samplename, trait_cutoff = min_dp, whitelist=whitelist)
if output_strandbiased_regions:
whitelist = find_bad_positions(coverage_matrix, target_folder = sample_output_folder, trait = "strandbias",
samplename = samplename, trait_cutoff = max_strand_ratio, whitelist=whitelist)
if output_undercovered_regions or output_strandbiased_regions:
if whitelist_filename:
whitelist.print_output()
##############################################################################################
# plot
if output_coverage_plots:
sample_df = plotting.create_all_coverage_plots(bedtools_output, exons=exons, exons_per_gene = exons_per_gene,
target_folder = sample_output_folder, whitelist=whitelist, amplicons=amplicons)
logging.info( "-" * 100 )
##############################################################################################
# create summaries across all samples
# this has to happen after the individual bam treatment because it assumes that coverage files for each bam are present
sampleinfo.print_output()
all_sample_filename = "all_samples.csv"
UniteCoverage.unite(all_sample_filename, target_folder = target_folder)
byte_size = os.path.getsize(target_folder+all_sample_filename)
mb_size = byte_size/1.049e+6
if output_all_sample_summary:
if mb_size > 2000:
logging.error( "The united coverage size of all samples is %sMB. Skipping summary plots." % (round(mb_size,2)) )
else:
all_sample_plotting.plot_exon_coverage(all_sample_filename, exons=exons, exons_per_gene = exons_per_gene,
target_folder = target_folder, whitelist=whitelist)
##################################################################################
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bed", help="region file in bed or Illumina Manifest format",
required=True)
parser.add_argument("-x", "--expected_variants", help="expected variants in vcf or HGMD BioMart export format")
parser.add_argument("-a", "--alias", help="gene name aliases, format: [old new]")
parser.add_argument("-w", "--whitelist", help="list of bams to analyse")
parser.add_argument("-c", "--min_coverage", type=int, help="minimum coverage (Default: 50X)", default=default_min_coverage)
parser.add_argument("-s", "--max_strandratio", type=float, help="maximum strand ratio (Default: 5)", default=default_max_strandbias)
args = parser.parse_args()
pprint.pprint(args)
##################################################################################
bed = args.bed
whitelist_filename = args.expected_variants
gene_alias_filename = args.alias
target_bams_filename = args.whitelist
min_dp = args.min_coverage
max_strand_ratio = args.max_strandratio
if "/" in bed:
target_folder = "/".join(bed.split("/")[:-1]) + "/"
else:
target_folder = "./"
run(bed,
target_folder,
min_dp,
max_strand_ratio,
whitelist_filename=whitelist_filename,
gene_alias_filename=gene_alias_filename,
target_bams_filename=target_bams_filename)
| aweller/CoverageCheck | CoverageCheck.py | Python | bsd-3-clause | 23,333 |
from __future__ import with_statement
import logging
import warnings
import django
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
return http.HttpBadRequest(e.args[0])
except ValidationError, e:
return http.HttpBadRequest(', '.join(e.messages))
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=sys.exc_info(), extra={'status_code': response_code, 'request':request})
if django.VERSION < (1, 3, 0):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
if self.override_urls():
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += self.override_urls()
urls += self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.is_authorized(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authorized(self, request, object=None):
"""
Handles checking of permissions to see if the user has authorization
to GET, POST, PUT, or DELETE this resource. If ``object`` is provided,
the authorization backend can apply additional row-level permissions
checking.
"""
auth_result = self._meta.authorization.is_authorized(request, object)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def build_bundle(self, obj=None, data=None, request=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(obj=obj, data=data, request=request)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return self.obj_get(request=request, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
setattr(bundle.obj, field_object.attribute, value.obj)
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Allows the ``Authorization`` class to further limit the object list.
Also a hook to customize per ``Resource``.
"""
if hasattr(self._meta.authorization, 'apply_limits'):
object_list = self._meta.authorization.apply_limits(request, object_list)
return object_list
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, request=None, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, request=None, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(request=request, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, request=None, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, request=None, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
bundle = self._meta.cache.get(cache_key)
if bundle is None:
bundle = self.obj_get(request=request, **kwargs)
self._meta.cache.set(cache_key, bundle)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, request=None, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, request=None, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, request=None, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, errors, request):
if request:
desired_format = self.determine_format(request)
else:
desired_format = self._meta.default_format
serialized = self.serialize(request, errors, desired_format)
response = http.HttpBadRequest(content=serialized, content_type=build_content_type(desired_format))
raise ImmediateHttpResponse(response=response)
def is_valid(self, bundle, request=None):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized[self._meta.collection_name]]
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle) for bundle in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle, request=request, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
try:
self.obj_delete(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle, request=request)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle, request=request)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
self.obj_delete(request=request, _obj=obj)
return http.HttpAccepted()
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
for identifier in obj_identifiers:
try:
obj = self.obj_get(request, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
except ObjectDoesNotExist:
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
true_values_list = ['true', 'True', True]
false_values_list = ['false', 'False', False]
none_values_list = ('nil', 'none', 'None', None)
if value in true_values_list:
value = True
elif value in false_values_list:
value = False
elif value in none_values_list:
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
if hasattr(self._meta.queryset.query.query_terms, 'keys'):
# Django 1.4 & below compatibility.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
# Django 1.5+.
query_terms = self._meta.queryset.query.query_terms
else:
if hasattr(QUERY_TERMS, 'keys'):
# Django 1.4 & below compatibility.
query_terms = QUERY_TERMS.keys()
else:
# Django 1.5+.
query_terms = QUERY_TERMS
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(request, 'GET'):
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
base_object_list = self.apply_filters(request, applicable_filters)
return self.apply_authorization_limits(request, base_object_list)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
base_object_list = self.get_object_list(request).filter(**kwargs)
object_list = self.apply_authorization_limits(request, base_object_list)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
return object_list[0]
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
self.is_valid(bundle,request)
if bundle.errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save parent
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or getattr(field_object, 'is_related', False):
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, request=None, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle.request, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
self.is_valid(bundle,request)
if bundle.errors and not skip_errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which can be used to narrow the query.
"""
base_object_list = self.get_object_list(request).filter(**kwargs)
authed_object_list = self.apply_authorization_limits(request, base_object_list)
if hasattr(authed_object_list, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
authed_object_list.delete()
else:
for authed_obj in authed_object_list:
authed_obj.delete()
def obj_delete(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
obj = kwargs.pop('_obj', None)
if not hasattr(obj, 'delete'):
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
obj.delete()
@transaction.commit_on_success()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = None
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_obj.save()
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, basestring):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_bundle.obj.save()
related_objs.append(related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| VishvajitP/django-tastypie | tastypie/resources.py | Python | bsd-3-clause | 84,667 |
"""
Views for the course_mode module
"""
import decimal
from django.core.urlresolvers import reverse
from django.http import (
HttpResponseBadRequest, Http404
)
from django.shortcuts import redirect
from django.views.generic.base import View
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from edxmako.shortcuts import render_to_response
from course_modes.models import CourseMode
from courseware.access import has_access
from student.models import CourseEnrollment
from student.views import course_from_id
from verify_student.models import SoftwareSecurePhotoVerification
from xmodule.modulestore.locations import SlashSeparatedCourseKey
class ChooseModeView(View):
"""
View used when the user is asked to pick a mode
When a get request is used, shows the selection page.
When a post request is used, assumes that it is a form submission
from the selection page, parses the response, and then sends user
to the next step in the flow
"""
@method_decorator(login_required)
def get(self, request, course_id, error=None):
""" Displays the course mode choice page """
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(request.user, course_key)
upgrade = request.GET.get('upgrade', False)
request.session['attempting_upgrade'] = upgrade
# verified users do not need to register or upgrade
if enrollment_mode == 'verified':
return redirect(reverse('dashboard'))
# registered users who are not trying to upgrade do not need to re-register
if enrollment_mode is not None and upgrade is False:
return redirect(reverse('dashboard'))
modes = CourseMode.modes_for_course_dict(course_key)
donation_for_course = request.session.get("donation_for_course", {})
chosen_price = donation_for_course.get(course_key, None)
course = course_from_id(course_key)
context = {
"course_modes_choose_url": reverse("course_modes_choose", kwargs={'course_id': course_key.to_deprecated_string()}),
"modes": modes,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"chosen_price": chosen_price,
"error": error,
"upgrade": upgrade,
}
if "verified" in modes:
context["suggested_prices"] = [decimal.Decimal(x) for x in modes["verified"].suggested_prices.split(",")]
context["currency"] = modes["verified"].currency.upper()
context["min_price"] = modes["verified"].min_price
return render_to_response("course_modes/choose.html", context)
@method_decorator(login_required)
def post(self, request, course_id):
""" Takes the form submission from the page and parses it """
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
user = request.user
# This is a bit redundant with logic in student.views.change_enrollement,
# but I don't really have the time to refactor it more nicely and test.
course = course_from_id(course_key)
if not has_access(user, 'enroll', course):
error_msg = _("Enrollment is closed")
return self.get(request, course_id, error=error_msg)
upgrade = request.GET.get('upgrade', False)
requested_mode = self.get_requested_mode(request.POST)
allowed_modes = CourseMode.modes_for_course_dict(course_key)
if requested_mode not in allowed_modes:
return HttpResponseBadRequest(_("Enrollment mode not supported"))
if requested_mode in ("audit", "honor"):
CourseEnrollment.enroll(user, course_key, requested_mode)
return redirect('dashboard')
mode_info = allowed_modes[requested_mode]
if requested_mode == "verified":
amount = request.POST.get("contribution") or \
request.POST.get("contribution-other-amt") or 0
try:
# validate the amount passed in and force it into two digits
amount_value = decimal.Decimal(amount).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
error_msg = _("Invalid amount selected.")
return self.get(request, course_id, error=error_msg)
# Check for minimum pricing
if amount_value < mode_info.min_price:
error_msg = _("No selected price or selected price is too low.")
return self.get(request, course_id, error=error_msg)
donation_for_course = request.session.get("donation_for_course", {})
donation_for_course[course_key] = amount_value
request.session["donation_for_course"] = donation_for_course
if SoftwareSecurePhotoVerification.user_has_valid_or_pending(request.user):
return redirect(
reverse('verify_student_verified',
kwargs={'course_id': course_key.to_deprecated_string()}) + "?upgrade={}".format(upgrade)
)
return redirect(
reverse('verify_student_show_requirements',
kwargs={'course_id': course_key.to_deprecated_string()}) + "?upgrade={}".format(upgrade))
def get_requested_mode(self, request_dict):
"""
Given the request object of `user_choice`, return the
corresponding course mode slug
"""
if 'audit_mode' in request_dict:
return 'audit'
if 'certificate_mode' and request_dict.get("honor-code"):
return 'honor'
if 'certificate_mode' in request_dict:
return 'verified'
| nanolearning/edx-platform | common/djangoapps/course_modes/views.py | Python | agpl-3.0 | 6,033 |
# -*- coding: UTF-8 -*-
"""
This file is part of Pondus, a personal weight manager.
Copyright (C) 2008-10 Eike Nicklas <eike@ephys.de>
This program is free software licensed under the MIT license. For details
see LICENSE or http://www.opensource.org/licenses/mit-license.php
"""
import pygtk
pygtk.require('2.0')
import gtk
import os
class SaveFileDialog(object):
"""Allows the user to select a file something should be saved to."""
def __init__(self, default_file_name, file_formats):
self.chooser = gtk.FileChooserDialog()
self.chooser.set_action(gtk.FILE_CHOOSER_ACTION_SAVE)
self.chooser.set_title(_('Save to File'))
self.chooser.set_current_folder(os.path.expanduser('~'))
self.chooser.set_current_name(default_file_name)
file_type_box = gtk.HBox(homogeneous=False, spacing=10)
file_type_label = gtk.Label(_('Save as File Type:'))
file_type_box.pack_start(file_type_label, False, False)
self.filetypeselector = gtk.combo_box_new_text()
for ending in file_formats:
self.filetypeselector.append_text(ending)
self.filetypeselector.set_active(0)
file_type_box.pack_end(self.filetypeselector, True, True)
self.chooser.vbox.pack_start(file_type_box, False, False)
# connect the signals
self.filetypeselector.connect('changed', self.update_file_ending)
# buttons in action area
self.chooser.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
self.chooser.add_button(gtk.STOCK_OK, gtk.RESPONSE_OK)
# show the dialog
self.chooser.show_all()
def run(self, plot=None):
"""Runs the dialog and closes it afterwards."""
response = self.chooser.run()
if response == gtk.RESPONSE_OK:
self.update_file_ending(self.filetypeselector)
filename = self.chooser.get_filename()
if plot is not None:
plot.save_to_file(filename)
self.chooser.hide()
return filename
self.chooser.hide()
def update_file_ending(self, filetypeselector):
"""Updates the file ending of the target file."""
ending = filetypeselector.get_active_text()
filename = os.path.split(self.chooser.get_filename())[1]
filebase = os.path.splitext(filename)[0]
self.chooser.set_current_name(filebase + ending)
| BackupTheBerlios/pondus | pondus/gui/dialog_save_file.py | Python | mit | 2,401 |
# Embedded file name: /usr/lib/enigma2/python/Components/Converter/SCServicePosition.py
import time
from Converter import Converter
from Poll import Poll
from enigma import iPlayableService
from Components.Element import cached, ElementError
class SCServicePosition(Poll, Converter, object):
TYPE_LENGTH = 0
TYPE_POSITION = 1
TYPE_REMAINING = 2
TYPE_GAUGE = 3
TYPE_ENDTIME = 4
def __init__(self, type):
Poll.__init__(self)
Converter.__init__(self, type)
args = type.split(',')
type = args.pop(0)
self.negate = 'Negate' in args
self.detailed = 'Detailed' in args
self.showHours = 'ShowHours' in args
self.showNoSeconds = 'ShowNoSeconds' in args
if type == 'Length':
self.type = self.TYPE_LENGTH
elif type == 'Position':
self.type = self.TYPE_POSITION
elif type == 'Remaining':
self.type = self.TYPE_REMAINING
elif type == 'Gauge':
self.type = self.TYPE_GAUGE
elif type == 'EndTime':
self.type = self.TYPE_ENDTIME
else:
raise ElementError('type must be {Length|Position|Remaining|Gauge|EndTime} with optional arguments {Negate|Detailed|ShowHours|ShowNoSeconds} for SCServicePosition converter')
if self.detailed:
self.poll_interval = 100
elif self.TYPE_ENDTIME:
self.poll_interval = 1000
elif self.type == self.TYPE_LENGTH:
self.poll_interval = 2000
else:
self.poll_interval = 500
self.poll_enabled = True
def getSeek(self):
s = self.source.service
return s and s.seek()
@cached
def getPosition(self):
seek = self.getSeek()
if seek is None:
return
else:
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
@cached
def getLength(self):
seek = self.getSeek()
if seek is None:
return
else:
length = seek.getLength()
if length[0]:
return 0
return length[1]
@cached
def getCutlist(self):
service = self.source.service
cue = service and service.cueSheet()
return cue and cue.getCutList()
@cached
def getText(self):
seek = self.getSeek()
if seek is None:
return ''
else:
if self.type == self.TYPE_LENGTH:
l = self.length
elif self.type == self.TYPE_POSITION:
l = self.position
elif self.type == self.TYPE_REMAINING:
l = self.length - self.position
elif self.type == self.TYPE_ENDTIME:
l = (self.length - self.position) / 90000
t = time.time()
t = time.localtime(t + l)
if self.showNoSeconds:
return '%02d:%02d' % (t.tm_hour, t.tm_min)
else:
return '%02d:%02d:%02d' % (t.tm_hour, t.tm_min, t.tm_sec)
if not self.detailed:
l /= 90000
if self.negate:
l = -l
if l > 0:
sign = ''
else:
l = -l
sign = '-'
if not self.detailed:
if self.showHours:
if self.showNoSeconds:
return sign + '%d:%02d' % (l / 3600, l % 3600 / 60)
else:
return sign + '%d:%02d:%02d' % (l / 3600, l % 3600 / 60, l % 60)
elif self.showNoSeconds:
return sign + '%d' % (l / 60)
else:
return sign + '%d:%02d' % (l / 60, l % 60)
else:
if self.showHours:
return sign + '%d:%02d:%02d:%03d' % (l / 3600 / 90000,
l / 90000 % 3600 / 60,
l / 90000 % 60,
l % 90000 / 90)
return sign + '%d:%02d:%03d' % (l / 60 / 90000, l / 90000 % 60, l % 90000 / 90)
return
range = 10000
@cached
def getValue(self):
pos = self.position
len = self.length
if pos is None or len is None or len <= 0:
return
else:
return pos * 10000 / len
position = property(getPosition)
length = property(getLength)
cutlist = property(getCutlist)
text = property(getText)
value = property(getValue)
def changed(self, what):
cutlist_refresh = what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evCuesheetChanged,)
time_refresh = what[0] == self.CHANGED_POLL or what[0] == self.CHANGED_SPECIFIC and what[1] in (iPlayableService.evCuesheetChanged,)
if cutlist_refresh:
if self.type == self.TYPE_GAUGE:
self.downstream_elements.cutlist_changed()
if time_refresh:
self.downstream_elements.changed(what) | kingvuplus/boom2 | lib/python/Components/Converter/SCServicePosition.py | Python | gpl-2.0 | 5,213 |
# This file is part of the Hotwire Shell user interface.
#
# Copyright (C) 2007 Colin Walters <walters@verbum.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os,sys,subprocess
from distutils.core import setup
from distutils.command.install import install
if __name__ == '__main__' and hasattr(sys.modules['__main__'], '__file__'):
basedir = os.path.dirname(os.path.abspath(__file__))
up_basedir = os.path.dirname(basedir)
if os.path.basename(basedir) == 'hotwire-shell':
print("Running uninstalled, extending path")
sys.path.insert(0, basedir)
os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)
from hotwire.version import __version__
def svn_info(wd):
import subprocess,io
tip = {}
for line in io.StringIO(subprocess.Popen(['svn', 'info', wd], stdout=subprocess.PIPE).communicate()[0]):
line = line.strip()
if not line:
continue
(k,v) = line.split(':', 1)
tip[k.strip()] = v.strip()
return tip
def svn_dist():
import subprocess,tempfile
import shutil
dt = os.path.join('dist', 'test')
try:
os.mkdir('dist')
except OSError as e:
pass
if os.path.exists(dt):
shutil.rmtree(dt)
subprocess.call(['svn', 'export', '.', dt])
oldwd = os.getcwd()
os.chdir(dt)
verfile = open(os.path.join('hotwire', 'version.py'), 'a')
verfile.write('\n\n##AUTOGENERATED by setup.py##\nsvn_version_info = %s\n' % (repr(svn_info(oldwd)),))
verfile.close()
subprocess.call(['python', 'setup.py', 'sdist', '-k', '--format=zip'])
def svn_dist_test():
import subprocess
svn_dist()
os.chdir('hotwire-' + __version__)
subprocess.call(['python', os.path.join('ui', 'test-hotwire')])
if 'svn-dist' in sys.argv:
svn_dist()
sys.exit(0)
elif 'svn-dist-test' in sys.argv:
svn_dist_test()
sys.exit(0)
kwargs = {}
if 'py2exe' in sys.argv:
import py2exe
kwargs['windows'] = [{'script': 'ui/hotwire', #'icon_resources': [(1, 'hotwire.ico')]
}]
kwargs['options'] = {'py2exe': {'packages': 'encodings',
'includes': 'cairo, pango, pangocairo, atk, gobject'}
}
else:
kwargs['scripts'] = ['ui/hotwire', 'ui/hotwire-editor', 'ui/hotwire-runtty', 'ui/hotwire-gedit-blocking',
'hotapps/bin/hotwire-sudo']
kwargs['data_files'] = [('share/applications', ['hotwire.desktop']),
('share/icons/hicolor/24x24/apps', ['images/hotwire.png', 'images/hotwire-sudo.png']),
# FIXME #('share/icons/hicolor/22x22/apps', ['images/hotwire-22.png']),
('share/hotwire/images', ['images/throbber.gif', 'images/throbber-done.gif',
'images/dfeet-method.png', 'images/dfeet-property.png', 'images/dfeet-object.png',
'images/perl.ico', 'images/python.ico', 'images/ruby.ico', 'images/unix.ico',
'images/external.png'])]
from DistUtilsExtra.command import *
kwargs['cmdclass'] = { "build_extra" : build_extra.build_extra,
"build_i18n" : build_i18n.build_i18n,
"build_help" : build_help.build_help,
"build_icons" : build_icons.build_icons }
class HotInstall(install):
def run(self):
install.run(self)
if os.name == 'posix':
if self.root is None:
print("Running gtk-update-icon-cache")
subprocess.call(['gtk-update-icon-cache', os.path.join(self.install_data, 'share', 'icons', 'hicolor')])
kwargs['cmdclass']['install'] = HotInstall
setup(name='hotwire',
version=__version__,
description='Hotwire Shell',
author='Colin Walters',
author_email='walters@verbum.org',
url='http://hotwire-shell.org',
packages=['hotwire', 'hotwire_ui', 'hotwire_ui.renderers', 'hotwire_ui.adaptors',
'hotwire.builtins',
'hotwire.externals', 'hotwire.externals', 'hotwire.externals.dispatch',
'hotwire.sysdep', 'hotwire.sysdep.fs_impl',
'hotwire.sysdep.proc_impl',
'hotwire.sysdep.term_impl', 'hotwire.sysdep.ipc_impl',
'hotvte', 'hotapps', 'hotapps.hotssh', 'hotapps.hotsudo'],
**kwargs)
| SDX2000/hotwire | setup.py | Python | gpl-2.0 | 5,200 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Insecure.Com LLC.
#
# Author: Adriano Monteiro Marques <py.adriano@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# This file contains the definitions of two main classes:
# NmapCommand represents and runs an Nmap command line. CommandConstructor
# builds a command line string from textual option descriptions.
import codecs
import locale
import sys
import os
import re
import tempfile
import unittest
import gtk
from types import StringTypes
try:
from subprocess import Popen, PIPE
except ImportError, e:
raise ImportError(str(e) + ".\n" + _("Python 2.4 or later is required."))
import zenmapCore.Paths
from zenmapCore.Paths import Path
from zenmapCore.NmapOptions import NmapOptions
from zenmapCore.UmitLogging import log
from zenmapCore.I18N import _
from zenmapCore.UmitConf import PathsConfig
from zenmapCore.Name import APP_NAME
# This variable is used in the call to Popen. It determines whether the
# subprocess invocation uses the shell or not. If it is False on Unix, the nmap
# process is started with execve and a list of arguments, which is what we want.
# (Indeed it fails when shell_state = True because it tries to exec
# ['sh', '-c', 'nmap', '-v', ...], which is wrong.) So normally we would want
# shell_state = False. But if shell_state = False on Windows, a big ugly black
# shell window opens whenever a scan is run, at least under py2exe. So we define
# shell_state = True on Windows only. Windows doesn't have exec, so it runs the
# command basically the same way regardless of shell_state.
shell_state = (sys.platform == "win32")
# The path to the nmap executable as used by Popen.
# Find the value from configuation file paths nmap_command_path
# to use for the location of the nmap executable.
nmap_paths = PathsConfig()
nmap_command_path = nmap_paths.nmap_command_path
log.debug(">>> Platform: %s" % sys.platform)
log.debug(">>> Nmap command path: %s" % nmap_command_path)
def split_quoted(s):
"""Like str.split, except that no splits occur inside quoted strings, and
quoted strings are unquoted."""
return [x.replace("\"", "") for x in re.findall('((?:"[^"]*"|[^"\s]+)+)', s)]
def wrap_file_in_preferred_encoding(f):
"""Wrap an open file to automatically decode its contents when reading from
the encoding given by locale.getpreferredencoding, or just return the file
if that doesn't work.
The nmap executable will write its output in whatever the system encoding
is. Nmap's output is usually all ASCII, but time zone it prints can be in a
different encoding. If it is not decoded correctly it will be displayed as
garbage characters. This function assists in reading the Nmap output. We
don't know for sure what the encoding used is, but we take a best guess and
decode the output into a proper unicode object so that the screen display
and XML writer interpret it correctly."""
try:
preferredencoding = locale.getpreferredencoding()
except locale.Error:
# This can happen if the LANG environment variable is set to something
# weird.
preferredencoding = None
if preferredencoding is not None:
try:
reader = codecs.getreader(preferredencoding)
return reader(f, "replace")
except LookupError:
# The lookup failed. This can happen if the preferred encoding is
# unknown ("X-MAC-KOREAN" has been observed). Ignore it and return
# the unwrapped file.
log.debug("Unknown encoding \"%s\"." % preferredencoding)
return f
def escape_nmap_filename(filename):
"""Escape '%' characters so they are not interpreted as strftime format
specifiers, which are not supported by Zenmap."""
return filename.replace("%", "%%")
class NmapCommand(object):
"""This class represents an Nmap command line. It is responsible for
starting, stopping, and returning the results from a command-line scan. A
command line is represented as a string but it is split into a list of
arguments for execution."""
def __init__(self, command):
"""Initialize an Nmap command. This creates temporary files for
redirecting the various types of output and sets the backing
command-line string."""
self.command = command
self.command_process = None
self._stdout_file = None
# Get the command as a list of options
self.command_list = self._get_sanitized_command_list()
# Go through the list and look for -oX or -oA, because that means the
# user has specified an XML output file. When we find one, we escape '%'
# characters to avoid strftime expansion and insert it back into the
# command. We also escape the arguments to -oG, -oN, and -oS for
# uniformity although we don't use the file names. If we find a -oX or
# -oA option, set self.xml_is_temp to False and don't delete the file
# after we're done. Otherwise, generate a random output file name and
# delete it when the scan is finished.
self.xml_is_temp = True
self.xml_output_filename = None
i = 0
while i < len(self.command_list):
if self.command_list[i] == "-oX":
self.xml_is_temp = False
if i == len(self.command_list) - 1:
break
self.xml_output_filename = self.command_list[i + 1]
escaped_xml_output_filename = escape_nmap_filename(self.xml_output_filename)
self.command_list[i + 1] = escaped_xml_output_filename
i += 1
elif self.command_list[i] == "-oA":
self.xml_is_temp = False
if i == len(self.command_list) - 1:
break
xml_output_prefix = self.command_list[i + 1]
self.xml_output_filename = xml_output_prefix + ".xml"
escaped_xml_output_prefix = escape_nmap_filename(xml_output_prefix)
self.command_list[i + 1] = escaped_xml_output_prefix
i += 1
elif self.command_list[i] in ("-oG", "-oN", "-oS"):
if i == len(self.command_list) - 1:
break
escaped_filename = escape_nmap_filename(self.command_list[i + 1])
self.command_list[i + 1] = escaped_filename
i += 1
if self.xml_is_temp:
self.xml_output_filename = tempfile.mktemp(prefix = APP_NAME + "-", suffix = ".xml")
escaped_xml_output_filename = escape_nmap_filename(self.xml_output_filename)
self.command_list.append("-oX")
self.command_list.append("%s" % escaped_xml_output_filename)
log.debug(">>> Temporary files:")
log.debug(">>> XML OUTPUT: %s" % self.xml_output_filename)
def _get_sanitized_command_list(self):
"""Remove comments from the command, add output options, and return the
command split up into a list ready for execution."""
command = self.command
# Remove comments from command.
command = re.sub('#.*', '', command)
# Split back into individual options, honoring double quotes.
command_list = split_quoted(command)
# Replace the executable name with the value of nmap_command_path.
command_list[0] = nmap_command_path
return command_list
def close(self):
"""Close and remove temporary output files used by the command."""
self._stdout_file.close()
if self.xml_is_temp:
os.remove(self.xml_output_filename)
def kill(self):
"""Kill the nmap subprocess."""
log.debug(">>> Killing scan process %s" % self.command_process.pid)
if sys.platform != "win32":
try:
from signal import SIGKILL
os.kill(self.command_process.pid, SIGKILL)
except:
pass
else:
try:
# Not sure if this works. Must research a bit more about this
# subprocess's method to see how it works.
# In the meantime, this should not raise any exception because
# we don't care if it killed the process as it never killed it anyway.
from subprocess import TerminateProcess
TerminateProcess(self.command_process._handle, 0)
except:
pass
def get_path(self):
"""Return a value for the PATH environment variable that is appropriate
for the current platform. It will be the PATH from the environment plus
possibly some platform-specific directories."""
path_env = os.getenv("PATH")
if path_env is None:
search_paths = []
else:
search_paths = path_env.split(os.pathsep)
for path in zenmapCore.Paths.get_extra_executable_search_paths():
if path not in search_paths:
search_paths.append(path)
return os.pathsep.join(search_paths)
def run_scan(self):
"""Run the command represented by this class."""
# We don't need a file name for stdout output, just a handle. A
# TemporaryFile is deleted as soon as it is closed, and in Unix is
# unlinked immediately after creation so it's not even visible.
f = tempfile.TemporaryFile(mode = "rb", prefix = APP_NAME + "-stdout-")
self._stdout_file = wrap_file_in_preferred_encoding(f)
search_paths = self.get_path()
env = dict(os.environ)
env["PATH"] = search_paths
log.debug("PATH=%s" % env["PATH"])
log.debug("Running command: %s" % repr(self.command_list))
self.command_process = Popen(self.command_list, bufsize=1,
stdin=PIPE,
stdout=f.fileno(),
stderr=f.fileno(),
shell=shell_state,
env=env)
def scan_state(self):
"""Return the current state of a running scan. A return value of True
means the scan is running and a return value of False means the scan
subprocess completed successfully. If the subprocess terminated with an
error an exception is raised. The scan must have been started with
run_scan before calling this method."""
if self.command_process == None:
raise Exception("Scan is not running yet!")
state = self.command_process.poll()
if state == None:
return True # True means that the process is still running
elif state == 0:
return False # False means that the process had a successful exit
else:
log.warning("An error occurred during the scan execution!")
log.warning("Command that raised the exception: '%s'" %
" ".join(self.command_list))
log.warning("Scan output:\n%s" % self.get_output())
raise Exception("An error occurred during the scan execution!\n\n'%s'" % self.get_output())
def get_output(self):
"""Return the stdout of the nmap subprocess."""
self._stdout_file.seek(0)
return self._stdout_file.read()
def get_xml_output_filename(self):
"""Return the name of the XML (-oX) output file."""
return self.xml_output_filename
class CommandConstructor:
"""This class builds a string representing an Nmap command line from textual
option descriptions such as 'Aggressive Options' or 'UDP Scan'
(corresponding to -A and -sU respectively). The name-to-option mapping is
done by the NmapOptions class. Options are stored in a dict that maps the
option name to a tuple containing its arguments and "level." The level is
the degree of repetition for options like -v that can be given more than
once."""
def __init__(self, options = {}):
"""Initialize a command line using the given options. The options are
given as a dict mapping option names to arguments."""
self.options = {}
self.option_profile = NmapOptions(Path.options)
for k, v in options.items():
self.add_option(k, v, False)
def add_option(self, option_name, args=[], level=False):
"""Add an option to the command line. Only one of args and level can be
defined. If both are defined, level takes precedence and args is
ignored."""
self.options[option_name] = (args, level)
def remove_option(self, option_name):
"""Remove an option from the command line."""
if option_name in self.options.keys():
del(self.options[option_name])
def get_command(self, target):
"""Return the contructed command line as a plain string."""
splited = ['%s' % nmap_command_path]
for option_name in self.options:
option = self.option_profile.get_option(option_name)
args, level = self.options[option_name]
if type(args) in StringTypes:
args = [args]
if level:
splited.append((option['option']+' ')*level)
elif args:
args = tuple (args)
splited.append(option['option'] % args[0])
else:
splited.append(option['option'])
splited.append(target)
return ' '.join(splited)
def get_options(self):
"""Return the options used in the command line, as a dict mapping
options names to arguments. The level, if any, is discarded."""
return dict([(k, v[0]) for k, v in self.options.items()])
class SplitQuotedTest(unittest.TestCase):
"""A unittest class that tests the split_quoted function."""
def test_split(self):
self.assertEqual(split_quoted(''), [])
self.assertEqual(split_quoted('a'), ['a'])
self.assertEqual(split_quoted('a b c'), 'a b c'.split())
def test_quotes(self):
self.assertEqual(split_quoted('a "b" c'), ['a', 'b', 'c'])
self.assertEqual(split_quoted('a "b c"'), ['a', 'b c'])
self.assertEqual(split_quoted('a "b c""d e"'), ['a', 'b cd e'])
self.assertEqual(split_quoted('a "b c"z"d e"'), ['a', 'b czd e'])
# Module test code.
if __name__ == '__main__':
unittest.TextTestRunner().run(unittest.TestLoader().loadTestsFromTestCase(SplitQuotedTest))
# This is an example of how CommandConstructor works. Nmap options are given
# textual option descriptions.
command = CommandConstructor()
command.add_option('Aggressive')
command.add_option('Version detection')
command.add_option('UDP Scan')
command.add_option('Idle Scan', ['10.0.0.138'])
command.add_option('UDP Scan')
command.add_option('ACK scan')
command.remove_option('Idle Scan')
print command.get_command('localhost')
| chriskmanx/qmole | QMOLEDEV64/nmap-4.76/zenmap/zenmapCore/NmapCommand.py | Python | gpl-3.0 | 15,676 |
#!/usr/bin/env python3
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import html5lib
import pdb
from collections import OrderedDict
import json
import csv
import contextlib
url = "https://kenpom.com/index.php"
#url = "https://kenpom.com/index.php?y=2017" #past year testing override
print ("Scrape Statistics Tool")
print ("**************************")
print ("data is from {0}".format(url))
print ("**************************")
with contextlib.closing(urlopen(url)) as page:
soup = BeautifulSoup(page, "html5lib")
ratings_table=soup.find('table', id='ratings-table')
IDX=[]
A=[]
B=[]
C=[]
D=[]
E=[]
F=[]
G=[]
H=[]
I=[]
J=[]
K=[]
L=[]
M=[]
index=0
for row in ratings_table.findAll("tr"):
col=row.findAll('td')
if len(col)>0:
index+=1
IDX.append(index)
A.append(col[0].find(text=True))
B.append(col[1].find(text=True))
C.append(col[2].find(text=True))
D.append(col[3].find(text=True))
E.append(col[4].find(text=True))
F.append(col[5].find(text=True))
G.append(col[7].find(text=True))
H.append(col[9].find(text=True))
I.append(col[11].find(text=True))
J.append(col[13].find(text=True))
K.append(col[15].find(text=True))
L.append(col[17].find(text=True))
M.append(col[19].find(text=True))
df=pd.DataFrame(IDX,columns=['Index'])
df['Rank']=A
df['Team']=B
df['Conf']=C
df['W-L']=D
df['AdjEM']=E
df['AdjO']=F
df['AdjD']=G
df['AdjT']=H
df['Luck']=I
df['AdjEMSOS']=J
df['OppOSOS']=K
df['OppDSOS']=L
df['AdjEMNCSOS']=M
with open('stats.json', 'w') as f:
f.write(df.to_json(orient='index'))
with open("stats.json") as stats_json:
dict_stats = json.load(stats_json, object_pairs_hook=OrderedDict)
stats_sheet = open('stats.csv', 'w', newline='')
csvwriter = csv.writer(stats_sheet)
count = 0
for row in dict_stats.values():
#pdb.set_trace()
if (count == 0):
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
stats_sheet.close()
print ("done.")
| meprogrammerguy/pyMadness | scrape_stats.py | Python | mit | 2,098 |
from setuptools import setup, find_packages
setup(name='pyio',
version='0.1',
packages=find_packages(),
author='Theo Julienne',
author_email='theo.julienne+pyio@gmail',
url='https://github.com/theojulienne/pyio',
license='MIT',
include_package_data=True,
description='pyio',
long_description='pyio',
platforms=['any'],
install_requires=[],
)
| theojulienne/pyio | setup.py | Python | mit | 409 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: junos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Run arbitrary commands on an Juniper junos device
description:
- Sends an arbitrary set of commands to an junos node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: junos
options:
commands:
description:
- The commands to send to the remote junos device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
- name: run show version on remote devices
junos_command:
commands: show version
- name: run show version and check to see if output contains Juniper
junos_command:
commands: show version
wait_for: result[0] contains Juniper
- name: run multiple commands on remote nodes
junos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
junos_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Juniper
- result[1] contains Loopback0
- name: run commands and specify the output format
junos_command:
commands:
- command: show version
output: json
"""
RETURN = """
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from functools import partial
from xml.etree import ElementTree as etree
from ansible.module_utils.junos import run_commands
from ansible.module_utils.junos import junos_argument_spec
from ansible.module_utils.junos import check_args as junos_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
from ansible.module_utils.netcli import Conditional, FailedConditionalError
from ansible.module_utils.network_common import ComplexList
try:
import jxmlease
HAS_JXMLEASE = True
except ImportError:
HAS_JXMLEASE = False
def check_args(module, warnings):
junos_check_args(module, warnings)
if module.params['rpcs']:
module.fail_json(msg='argument rpcs has been deprecated, please use '
'junos_rpc instead')
def to_lines(stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def parse_commands(module, warnings):
spec = dict(
command=dict(key=True),
output=dict(default=module.params['display'], choices=['text', 'json', 'xml']),
prompt=dict(),
answer=dict()
)
transform = ComplexList(spec, module)
commands = transform(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'Only show commands are supported when using check_mode, not '
'executing %s' % item['command']
)
if item['output'] == 'json' and 'display json' not in item['command']:
item['command'] += '| display json'
elif item['output'] == 'xml' and 'display xml' not in item['command']:
item['command'] += '| display xml'
else:
if '| display json' in item['command']:
item['command'] = str(item['command']).replace(' | display json', '')
elif '| display xml' in item['command']:
item['command'] = str(item['command']).replace(' | display xml', '')
commands[index] = item
return commands
def main():
"""entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
display=dict(choices=['text', 'json', 'xml'], default='text', aliases=['format', 'output']),
# deprecated (Ansible 2.3) - use junos_rpc
rpcs=dict(type='list'),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
display = module.params['display']
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for index, (resp, cmd) in enumerate(zip(responses, commands)):
if cmd['output'] == 'xml':
if not HAS_JXMLEASE:
module.fail_json(msg='jxmlease is required but does not appear to '
'be installed. It can be installed using `pip install jxmlease`')
try:
responses[index] = jxmlease.parse(resp)
except:
raise ValueError(resp)
for item in list(conditionals):
try:
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
except FailedConditionalError:
pass
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'warnings': warnings,
'stdout': responses,
'stdout_lines': to_lines(responses)
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| halberom/ansible | lib/ansible/modules/network/junos/junos_command.py | Python | gpl-3.0 | 8,873 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('farms', '0022_add_validators'),
]
operations = [
migrations.AlterField(
model_name='probereading',
name='source',
field=models.CharField(default=b'User', max_length=8, choices=[(b'User', b'User Entry'), (b'UGA', b'UGA Database'), (b'Computed', b'Computed'), (b'Unknown', b'Unknown')]),
preserve_default=True,
),
migrations.AlterField(
model_name='waterhistory',
name='source',
field=models.CharField(default=b'User', max_length=8, choices=[(b'User', b'User Entry'), (b'UGA', b'UGA Database'), (b'Computed', b'Computed'), (b'Unknown', b'Unknown')]),
preserve_default=True,
),
migrations.AlterField(
model_name='waterregister',
name='source',
field=models.CharField(default=b'User', max_length=8, choices=[(b'User', b'User Entry'), (b'UGA', b'UGA Database'), (b'Computed', b'Computed'), (b'Unknown', b'Unknown')]),
preserve_default=True,
),
]
| warnes/irrigatorpro | irrigator_pro/farms/migrations/0023_reorder_source_choices.py | Python | mit | 1,225 |
import socket
from math import sqrt
Host = 'localhost'
Porta = 2002
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((Host, Porta))
while True:
mensagem, cliente = s.recvfrom(2048)
mensagem = mensagem.decode("utf-8")
mensagemsp = mensagem.split(" ")
if mensagemsp[0] == "sair":
break
else:
if mensagemsp[0] == "soma":
teste = len(mensagemsp)
if teste == 4:
soma = int(mensagemsp[1]) + int(mensagemsp[2])
soma = str(soma)
soma = soma.encode("utf-8")
s.sendto(soma, cliente)
else:
erro="mensagem invalida"
erro = erro.encode("utf-8")
s.sendto(erro, cliente)
elif mensagemsp[0] == "raiz_quadrada":
teste = len(mensagemsp)
if teste == 3:
raiz = sqrt(int(mensagemsp[1]))
raiz = str(raiz)
raiz = raiz.encode("utf-8")
s.sendto(raiz, cliente)
else:
erro="mensagem invalida"
erro = erro.encode("utf-8")
s.sendto(erro, cliente)
else:
erro1 = "Mensagem invalida"
erro1 = erro1.encode("utf-8")
s.sendto(erro1, cliente)
s.close() | felipeatr/tresa | servidor calculadora.py | Python | gpl-3.0 | 1,027 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the consoleauth RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova import rpc
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('consoleauth',
help='Set a version cap for messages sent to consoleauth services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConsoleAuthAPI(object):
'''Client side of the consoleauth rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
1.2 - Added instance_uuid to authorize_console, and
delete_tokens_for_instance
... Grizzly and Havana support message version 1.2. So, any changes
to existing methods in 2.x after that point should be done such that
they can handle the version_cap being set to 1.2.
2.0 - Major API rev for Icehouse
'''
VERSION_ALIASES = {
'grizzly': '1.2',
'havana': '1.2',
}
def __init__(self):
super(ConsoleAuthAPI, self).__init__()
target = messaging.Target(topic=CONF.consoleauth_topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.consoleauth,
CONF.upgrade_levels.consoleauth)
self.client = rpc.get_client(target, version_cap=version_cap)
def authorize_console(self, ctxt, token, console_type, host, port,
internal_access_path, instance_uuid):
# The remote side doesn't return anything, but we want to block
# until it completes.'
version = '2.0'
if not self.client.can_send_version('2.0'):
# NOTE(russellb) Havana compat
version = '1.2'
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt,
'authorize_console',
token=token, console_type=console_type,
host=host, port=port,
internal_access_path=internal_access_path,
instance_uuid=instance_uuid)
def check_token(self, ctxt, token):
version = '2.0'
if not self.client.can_send_version('2.0'):
# NOTE(russellb) Havana compat
version = '1.0'
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'check_token', token=token)
def delete_tokens_for_instance(self, ctxt, instance_uuid):
version = '2.0'
if not self.client.can_send_version('2.0'):
# NOTE(russellb) Havana compat
version = '1.2'
cctxt = self.client.prepare(version=version)
return cctxt.cast(ctxt,
'delete_tokens_for_instance',
instance_uuid=instance_uuid)
| ewindisch/nova | nova/consoleauth/rpcapi.py | Python | apache-2.0 | 3,389 |
'''
multi_lock.py - this file is part of S3QL.
Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>
This work can be distributed under the terms of the GNU GPLv3.
'''
import threading
import logging
from contextlib import contextmanager
__all__ = [ "MultiLock" ]
log = logging.getLogger(__name__)
class MultiLock:
"""Provides locking for multiple objects.
This class provides locking for a dynamically changing set of objects: The
`acquire` and `release` methods have an additional argument, the locking
key. Only locks with the same key can actually see each other, so that
several threads can hold locks with different locking keys at the same time.
MultiLock instances can be used as context managers.
Note that it is actually possible for one thread to release a lock that has
been obtained by a different thread. This is not a bug but a feature.
"""
def __init__(self):
self.locked_keys = set()
self.cond = threading.Condition(threading.Lock())
@contextmanager
def __call__(self, *key):
self.acquire(*key)
try:
yield
finally:
self.release(*key)
def acquire(self, *key, timeout=None):
'''Acquire lock for given key
If timeout is exceeded, return False. Otherwise return True.
'''
with self.cond:
if not self.cond.wait_for(lambda: key not in self.locked_keys, timeout):
return False
self.locked_keys.add(key)
def release(self, *key, noerror=False):
"""Release lock on given key
If noerror is False, do not raise exception if *key* is
not locked.
"""
with self.cond:
if noerror:
self.locked_keys.discard(key)
else:
self.locked_keys.remove(key)
self.cond.notifyAll()
| singleton7/main | src/s3ql/multi_lock.py | Python | gpl-3.0 | 1,885 |
# Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import xml.dom.minidom
from oslo_log import log
import six
from manila.i18n import _LW
LOG = log.getLogger(__name__)
def name(tt):
return tt[0]
def attrs(tt):
return tt[1]
def kids(tt):
return filter_tuples(tt[2])
def filter_tuples(l):
"""Return only the tuples in a list.
In a tupletree, tuples correspond to XML elements. Useful for
stripping out whitespace data in a child list.
"""
if l is None:
return []
else:
return [x for x in l if type(x) == tuple]
def parse_xml_api(tt):
check_node(tt, 'ResponsePacket', ['xmlns'])
child = optional_child(tt, ['Response', 'PacketFault'])
return child
def parse_response(tt):
check_node(tt, 'Response')
list_child = [
'QueryStatus',
'FileSystem',
'FileSystemCapabilities',
'FileSystemCapacityInfo',
'Mount',
'CifsShare',
'CifsServer',
'Volume',
'StoragePool',
'Fault',
'TaskResponse',
'Checkpoint',
'NfsExport',
'Mover',
'MoverStatus',
'MoverDnsDomain',
'MoverInterface',
'MoverRoute',
'LogicalNetworkDevice',
'MoverDeduplicationSettings',
'Vdm',
]
return list_of_various(tt, list_child)
def parse_querystatus(tt):
check_node(tt, 'QueryStatus', ['maxSeverity'])
child = list_of_various(tt, ['Problem'])
if child:
return name(tt), attrs(tt), child
else:
return name(tt), attrs(tt)
def parse_filesystem(tt):
required_attrs = ['fileSystem', 'name', 'type', 'storages', 'volume']
optional_attrs = [
'containsSlices',
'flrClock',
'internalUse',
'maxFlrRetentionPeriod',
'storagePools',
'virtualProvisioning',
'dataServicePolicies',
]
check_node(tt, 'FileSystem', required_attrs, optional_attrs)
list_child = [
'RwFileSystemHosts',
'RoFileSystemHosts',
'FileSystemAutoExtInfo',
'ProductionFileSystemData',
'MigrationFileSystemData',
]
child = list_of_various(tt, list_child)
if len(child) > 0:
for item in child:
if (item[0] == 'RwFileSystemHosts' or
item[0] == 'RoFileSystemHosts'):
if 'mover' in item[1].keys():
attrs(tt)['mover'] = item[1]['mover']
if 'moverIdIsVdm' in item[1].keys():
attrs(tt)['moverIdIsVdm'] = item[1]['moverIdIsVdm']
elif item[0] == 'FileSystemAutoExtInfo':
if 'autoExtEnabled' in item[1].keys():
attrs(tt)['autoExtEnabled'] = item[1]['autoExtEnabled']
if 'autoExtensionMaxSize' in item[1].keys():
attrs(tt)['autoExtensionMaxSize'] = (
item[1]['autoExtensionMaxSize'])
if 'highWaterMark' in item[1].keys():
attrs(tt)['highWaterMark'] = item[1]['highWaterMark']
elif item[0] == 'ProductionFileSystemData':
if 'cwormState' in item[1].keys():
attrs(tt)['cwormState'] = item[1]['cwormState']
if 'replicationRole' in item[1].keys():
attrs(tt)['replicationRole'] = item[1]['replicationRole']
elif item[0] == 'MigrationFileSystemData':
if 'state' in item[1].keys():
attrs(tt)['state'] = item[1]['state']
return name(tt), attrs(tt)
def parse_rwfilesystemhosts_filesystem(tt):
check_node(tt, 'RwFileSystemHosts', ['mover'], ['moverIdIsVdm'])
return name(tt), attrs(tt)
def parse_rofilesystemhosts_filesystem(tt):
check_node(tt, 'RoFileSystemHosts', ['mover'], ['moverIdIsVdm'])
return name(tt), attrs(tt)
def parse_rwfilesystemhosts_ckpt(tt):
check_node(tt, 'rwFileSystemHosts', ['mover'], ['moverIdIsVdm'])
return name(tt), attrs(tt)
def parse_rofilesystemhosts_ckpt(tt):
check_node(tt, 'roFileSystemHosts', ['mover'], ['moverIdIsVdm'])
return name(tt), attrs(tt)
def parse_filesystemautoextinfo(tt):
required_attrs = []
optional_attrs = [
'autoExtEnabled',
'autoExtensionMaxSize',
'highWaterMark',
]
check_node(tt, 'FileSystemAutoExtInfo', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_productionfilesystemdata(tt):
required_attrs = []
optional_attrs = ['cwormState', 'replicationRole']
check_node(tt, 'ProductionFileSystemData', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_migrationfilesystemdata(tt):
check_node(tt, 'MigrationFileSystemData', [], ['state'])
return name(tt), attrs(tt)
def parse_filesystemcapabilities(tt):
check_node(tt, 'FileSystemCapabilities', ['fileSystem'], [])
child = list_of_various(tt, ['StoragePoolBased', 'DiskVolumeBased'])
if len(child) > 0:
for item in child:
if item[0] == 'StoragePoolBased':
if 'recommendedPool' in item[1].keys():
attrs(tt)['recommendedPool'] = item[1]['recommendedPool']
if 'validPools' in item[1].keys():
attrs(tt)['validPools'] = item[1]['validPools']
return name(tt), attrs(tt)
def parse_storagepoolbased(tt):
check_node(tt, 'StoragePoolBased', [], ['recommendedPool', 'validPools'])
return name(tt), attrs(tt)
def parse_diskvolumebased(tt):
required_attrs = []
optional_attrs = ['recommendedStorage', 'validStorages']
check_node(tt, 'DiskVolumeBased', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_filesystemcapacityinfo(tt):
check_node(tt, 'FileSystemCapacityInfo', ['fileSystem', 'volumeSize'], [])
child = optional_child(tt, ['ResourceUsage'])
if child is not None:
if child[0] == 'ResourceUsage':
if 'spaceTotal' in child[1].keys():
attrs(tt)['spaceTotal'] = child[1]['spaceTotal']
if 'filesUsed' in child[1].keys():
attrs(tt)['filesUsed'] = child[1]['filesUsed']
if 'spaceUsed' in child[1].keys():
attrs(tt)['spaceUsed'] = child[1]['spaceUsed']
if 'filesTotal' in child[1].keys():
attrs(tt)['filesTotal'] = child[1]['filesTotal']
return name(tt), attrs(tt)
def parse_resourceusage(tt):
required_attrs = ['filesTotal', 'filesUsed', 'spaceTotal', 'spaceUsed']
check_node(tt, 'ResourceUsage', required_attrs)
return name(tt), attrs(tt)
def parse_mount(tt):
required_attrs = ['fileSystem', 'mover', 'path']
optional_attrs = ['disabled', 'ntCredential', 'moverIdIsVdm']
check_node(tt, 'Mount', required_attrs, optional_attrs)
child = list_of_various(tt, ['NfsOptions', 'CifsOptions'])
if child is not None:
for item in child:
if item[0] == 'NfsOptions':
if 'ro' in item[1].keys():
attrs(tt)['ro'] = item[1]['ro']
if item[0] == 'CifsOptions':
if 'cifsSyncwrite' in item[1].keys():
attrs(tt)['cifsSyncwrite'] = item[1]['cifsSyncwrite']
return name(tt), attrs(tt)
def parse_nfsoptions(tt):
required_attrs = []
optional_attrs = ['ro', 'prefetch', 'uncached', 'virusScan']
check_node(tt, 'NfsOptions', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_cifsoptions(tt):
required_attrs = []
optional_attrs = [
'cifsSyncwrite',
'accessPolicy',
'lockingPolicy',
'notify',
'notifyOnAccess',
'notifyOnWrite',
'oplock',
'triggerLevel',
]
check_node(tt, 'CifsOptions', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_cifsshare(tt):
required_attrs = ['mover', 'name', 'path']
optional_attrs = ['comment', 'fileSystem', 'maxUsers', 'moverIdIsVdm']
check_node(tt, 'CifsShare', required_attrs, optional_attrs)
child = one_child(tt, ['CifsServers'])
if child is not None:
attrs(tt)['CifsServers'] = child[1]
return name(tt), attrs(tt)
def parse_cifsservers(tt):
check_node(tt, 'CifsServers')
child = list_of_various(tt, ['li'])
if len(child) > 0 and child[0] is not None:
return 'CifsServers', child
def parse_li(tt):
check_node(tt, 'li', [], [], [], True)
return ''.join(tt[2])
def parse_cifsserver(tt):
required_attrs = ['mover', 'name', 'type']
optional_attrs = ['localUsers', 'interfaces', 'moverIdIsVdm']
check_node(tt, 'CifsServer', required_attrs, optional_attrs)
list_child = [
'Aliases',
'StandaloneServerData',
'NT40ServerData',
'W2KServerData',
]
child = list_of_various(tt, list_child)
if len(child) > 0:
for item in child:
if item[0] == 'Aliases':
attrs(tt)['aliases'] = item[1]
elif item[0] == 'W2KServerData':
if 'domain' in item[1].keys():
attrs(tt)['domain'] = item[1]['domain']
if 'domainJoined' in item[1].keys():
attrs(tt)['domainJoined'] = item[1]['domainJoined']
if 'compName' in item[1].keys():
attrs(tt)['compName'] = item[1]['compName']
elif item[0] == 'NT40ServerData':
if 'domain' in item[1].keys():
attrs(tt)['domain'] = item[1]['domain']
return name(tt), attrs(tt)
def parse_aliases(tt):
check_node(tt, 'Aliases')
child = list_of_various(tt, ['li'])
if len(child) > 0:
return 'Aliases', child
def parse_standaloneserverdata(tt):
check_node(tt, 'StandaloneServerData', ['workgroup'])
return name(tt), attrs(tt)
def parse_nt40serverdata(tt):
check_node(tt, 'NT40ServerData', ['domain'])
return name(tt), attrs(tt)
def parse_w2kserverdata(tt):
check_node(tt, 'W2KServerData', ['compName', 'domain'], ['domainJoined'])
return name(tt), attrs(tt)
def parse_volume(tt):
required_attrs = ['name', 'size', 'type', 'virtualProvisioning', 'volume']
optional_attrs = ['clientVolumes']
check_node(tt, 'Volume', required_attrs, optional_attrs)
list_child = [
'MetaVolumeData',
'SliceVolumeData',
'StripeVolumeData',
'DiskVolumeData',
'PoolVolumeData',
'FreeSpace',
]
child = list_of_various(tt, list_child)
if len(child) > 0:
for item in child:
if item[0] == 'MetaVolumeData':
if 'memberVolumes' in item[1].keys():
attrs(tt)['memberVolumes'] = item[1]['memberVolumes']
if 'clientFileSystems' in item[1].keys():
attrs(tt)['clientFileSystems'] = (
item[1]['clientFileSystems'])
return name(tt), attrs(tt)
def parse_slicevolumedata(tt):
pass
def parse_stripevolumedata(tt):
pass
def parse_diskvolumedata(tt):
pass
def parse_poolvolumedata(tt):
pass
def parse_freespace(tt):
pass
def parse_metavolumedata(tt):
check_node(tt, 'MetaVolumeData', ['memberVolumes'], ['clientFileSystems'])
return name(tt), attrs(tt)
def parse_storagepool(tt):
required_attrs = [
'autoSize',
'diskType',
'memberVolumes',
'movers',
'name',
'pool',
'size',
'storageSystems',
'usedSize',
]
optional_attrs = [
'description',
'mayContainSlicesDefault',
'stripeCount',
'stripeSize',
'templatePool',
'virtualProvisioning',
'dataServicePolicies',
'isHomogeneous',
]
check_node(tt, 'StoragePool', required_attrs, optional_attrs)
list_child = ['SystemStoragePoolData', 'UserStoragePoolData']
child = list_of_various(tt, list_child)
if len(child) > 0:
for item in child:
if item[0] == 'SystemStoragePoolData':
if 'greedy' in item[1].keys():
attrs(tt)['greedy'] = item[1]['greedy']
if 'isBackendPool' in item[1].keys():
attrs(tt)['isBackendPool'] = item[1]['isBackendPool']
return name(tt), attrs(tt)
def parse_systemstoragepooldata(tt):
required_attrs = ['potentialAdditionalSize']
optional_attrs = [
'greedy',
'dynamic',
'isBackendPool',
'usedSize',
'size',
]
check_node(tt, 'SystemStoragePoolData', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_userstoragepooldata(tt):
pass
def parse_fault(tt):
check_node(tt, 'Fault', ['maxSeverity'])
child = list_of_various(tt, ['Problem'])
if len(child) != 0:
return name(tt), attrs(tt), child
else:
return name(tt), attrs(tt)
def parse_packetfault(tt):
check_node(tt, 'PacketFault', ['maxSeverity'])
child = list_of_various(tt, ['Problem'])
if len(child) != 0:
return name(tt), attrs(tt), child
else:
return name(tt), attrs(tt)
def parse_problem(tt):
required_attrs = ['component', 'messageCode', 'severity']
optional_attrs = ['facility', 'message']
check_node(tt, 'Problem', required_attrs, optional_attrs)
child = list_of_various(tt, ['Description', 'Action', 'Diagnostics'])
if 0 != len(child):
for item in child:
if item is not None:
if 'Description' in item.keys():
attrs(tt)['description'] = item['Description']
if 'Action' in item.keys():
attrs(tt)['action'] = item['Action']
if 'Diagnostics' in item.keys():
attrs(tt)['Diagnostics'] = item['Diagnostics']
return name(tt), attrs(tt)
def parse_description(tt):
check_node(tt, 'Description', [], [], [], True)
if tt[2] is not None:
return {name(tt): ''.join(tt[2])}
def parse_action(tt):
pass
def parse_diagnostics(tt):
check_node(tt, 'Diagnostics', [], [], [], True)
return {name(tt): ''.join(tt[2])}
def parse_taskresponse(tt):
check_node(tt, 'TaskResponse', ['taskId'])
child = one_child(tt, ['Status'])
if 'maxSeverity' in child[1].keys():
attrs(tt)['maxSeverity'] = child[1]['maxSeverity']
if len(child) == 2:
return name(tt), attrs(tt)
else:
return name(tt), attrs(tt), child[2]
def parse_status(tt):
check_node(tt, 'Status', ['maxSeverity'])
child = list_of_various(tt, ['Problem'])
if child:
return name(tt), attrs(tt), child
else:
return name(tt), attrs(tt)
def parse_checkpoint(tt):
required_attrs = ['checkpoint', 'name', 'state', 'time']
optional_attrs = [
'baseline',
'checkpointOf',
'fileSystemSize',
'writeable',
]
check_node(tt, 'Checkpoint', required_attrs, optional_attrs)
child = list_of_various(tt, ['rwFileSystemHosts', 'roFileSystemHosts'])
for item in child:
if item[0] == 'rwFileSystemHosts' or item[0] == 'roFileSystemHosts':
if 'mover' in item[1].keys():
attrs(tt)['mover'] = item[1]['mover']
if 'moverIdIsVdm' in item[1].keys():
attrs(tt)['moverIdIsVdm'] = item[1]['moverIdIsVdm']
if item[0] == 'roFileSystemHosts':
attrs(tt)['readOnly'] = True
else:
attrs(tt)['readOnly'] = False
return name(tt), attrs(tt)
def parse_nfsexport(tt):
required_attrs = ['mover', 'path']
optional_attrs = ['anonUser', 'fileSystem', 'readOnly']
check_node(tt, 'NfsExport', required_attrs, optional_attrs)
list_child = ['AccessHosts', 'RwHosts', 'RoHosts', 'RootHosts']
child = list_of_various(tt, list_child)
for item in child:
if 'AccessHosts' in item.keys():
attrs(tt)['AccessHosts'] = item['AccessHosts']
if 'RwHosts' in item.keys():
attrs(tt)['RwHosts'] = item['RwHosts']
if 'RoHosts' in item.keys():
attrs(tt)['RoHosts'] = item['RoHosts']
if 'RootHosts' in item.keys():
attrs(tt)['RootHosts'] = item['RootHosts']
return name(tt), attrs(tt)
def parse_accesshosts(tt):
check_node(tt, 'AccessHosts')
access_hosts = []
child = list_of_various(tt, ['li'])
for item in child:
if item != '':
access_hosts.append(item)
return {'AccessHosts': access_hosts}
def parse_rwhosts(tt):
check_node(tt, 'RwHosts')
rw_hosts = []
child = list_of_various(tt, ['li'])
for item in child:
if item != '':
rw_hosts.append(item)
return {'RwHosts': rw_hosts}
def parse_rohosts(tt):
check_node(tt, 'RoHosts')
ro_hosts = []
child = list_of_various(tt, ['li'])
for item in child:
if item != '':
ro_hosts.append(item)
return {'RoHosts': ro_hosts}
def parse_roothosts(tt):
check_node(tt, 'RootHosts')
root_hosts = []
child = list_of_various(tt, ['li'])
for item in child:
if item != '':
root_hosts.append(item)
return {'RootHosts': root_hosts}
def parse_mover(tt):
required_attrs = ['host', 'mover', 'name']
optional_attrs = [
'failoverPolicy',
'i18NMode',
'ntpServers',
'role',
'standbyFors',
'standbys',
'targetState',
]
check_node(tt, 'Mover', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_moverstatus(tt):
required_attrs = ['csTime', 'mover', 'uptime']
optional_attrs = ['clock', 'timezone', 'version']
check_node(tt, 'MoverStatus', required_attrs, optional_attrs)
child = one_child(tt, ['Status'])
if len(child) >= 2:
attrs(tt)['Status'] = child[1]['maxSeverity']
if len(child) >= 3:
attrs(tt)['Problem'] = child[2]
return name(tt), attrs(tt)
def parse_moverdnsdomain(tt):
required_attrs = ['mover', 'name', 'servers']
optional_attrs = ['protocol']
check_node(tt, 'MoverDnsDomain', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_moverinterface(tt):
required_attrs = ['device', 'ipAddress', 'macAddr', 'mover', 'name']
optional_attrs = [
'broadcastAddr',
'ipVersion',
'mtu',
'netMask',
'up',
'vlanid',
]
check_node(tt, 'MoverInterface', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_moverroute(tt):
required_attrs = ['mover']
optional_attrs = [
'destination',
'interface',
'ipVersion',
'netMask',
'gateway',
]
check_node(tt, 'MoverRoute', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_logicalnetworkdevice(tt):
required_attrs = ['mover', 'name', 'speed', 'type']
optional_attrs = ['interfaces']
check_node(tt, 'LogicalNetworkDevice', required_attrs, optional_attrs)
return name(tt), attrs(tt)
def parse_moverdeduplicationsettings(tt):
required_attrs = ['mover']
optional_attrs = [
'accessTime',
'modificationTime',
'maximumSize',
'minimumSize',
'caseSensitive',
'duplicateDetectionMethod',
'minimumScanInterval',
'fileExtensionExcludeList',
'savVolHighWatermark',
'backupDataHighWatermark',
'CPULowWatermark',
'CPUHighWatermark',
'cifsCompressionEnabled',
]
check_node(tt,
'MoverDeduplicationSettings',
required_attrs,
optional_attrs)
return name(tt), attrs(tt)
def parse_vdm(tt):
required_attrs = ['name', 'state', 'vdm']
optional_attrs = ['mover', 'rootFileSystem']
check_node(tt, 'Vdm', required_attrs, optional_attrs)
child = list_of_various(tt, ['Status', 'Interfaces'])
if len(child) > 0:
for item in child:
if 'Interfaces' == item[0]:
attrs(tt)['Interfaces'] = item[1]
return name(tt), attrs(tt)
def parse_interfaces(tt):
check_node(tt, 'Interfaces')
interfaces = []
child = list_of_various(tt, ['li'])
for item in child:
if item != '':
interfaces.append(item)
if interfaces:
return 'Interfaces', interfaces
def one_child(tt, acceptable):
"""Parse children of a node with exactly one child node.
PCData is ignored.
"""
k = kids(tt)
if len(k) != 1:
LOG.warn(_LW('Expected just one %(item)s, got %(more)s.'),
{'item': acceptable,
'more': " ".join([t[0] for t in k])})
child = k[0]
if name(child) not in acceptable:
LOG.warn(_LW('Expected one of %(item)s, got %(child)s '
'under %(parent)s.'),
{'item': acceptable,
'child': name(child),
'parent': name(tt)})
return parse_any(child)
def parse_any(tt):
"""Parse any fragment of XML."""
node_name = name(tt).replace('.', '_')
# Special handle for file system and checkpoint
if node_name == 'RwFileSystemHosts' or node_name == 'RoFileSystemHosts':
node_name += '_filesystem'
elif node_name == 'rwFileSystemHosts' or node_name == 'roFileSystemHosts':
node_name += '_ckpt'
fn_name = 'parse_' + node_name.lower()
fn = globals().get(fn_name)
if fn is None:
LOG.warn(_LW('No parser for node type %s.'), name(tt))
else:
return fn(tt)
def check_node(tt, nodename, required_attrs=None, optional_attrs=None,
allowed_children=None, allow_pcdata=False):
"""Check static local constraints on a single node.
The node must have the given name. The required attrs must be
present, and the optional attrs may be.
If allowed_children is not None, the node may have children of the
given types. It can be [] for nodes that may not have any
children. If it's None, it is assumed the children are validated
in some other way.
If allow_pcdata is true, then non-whitespace text children are allowed.
(Whitespace text nodes are always allowed.)
"""
if not optional_attrs:
optional_attrs = []
if not required_attrs:
required_attrs = []
if name(tt) != nodename:
LOG.warn(_LW('Expected node type %(expected)s, not %(actual)s.'),
{'expected': nodename, 'actual': name(tt)})
# Check we have all the required attributes, and no unexpected ones
tt_attrs = {}
if attrs(tt) is not None:
tt_attrs = attrs(tt).copy()
for attr in required_attrs:
if attr not in tt_attrs:
LOG.warn(_LW('Expected %(attr)s attribute on %(node)s node,'
' but only have %(attrs)s.'),
{'attr': attr,
'node': name(tt),
'attrs': attrs(tt).keys()})
else:
del tt_attrs[attr]
for attr in optional_attrs:
if attr in tt_attrs:
del tt_attrs[attr]
if len(tt_attrs.keys()) > 0:
LOG.warn(_LW('Invalid extra attributes %s.'), tt_attrs.keys())
if allowed_children is not None:
for c in kids(tt):
if name(c) not in allowed_children:
LOG.warn(_LW('Unexpected node %(node)s under %(parent)s;'
' wanted %(expected)s.'),
{'node': name(c),
'parent': name(tt),
'expected': allowed_children})
if not allow_pcdata:
for c in tt[2]:
if isinstance(c, six.string_types):
if c.lstrip(' \t\n') != '':
LOG.warn(_LW('Unexpected non-blank pcdata node %(node)s'
' under %(parent)s.'),
{'node': repr(c),
'parent': name(tt)})
def optional_child(tt, allowed):
"""Parse zero or one of a list of elements from the child nodes."""
k = kids(tt)
if len(k) > 1:
LOG.warn(_LW('Expected either zero or one of %(node)s '
'under %(parent)s.'), {'node': allowed,
'parent': tt})
elif len(k) == 1:
return one_child(tt, allowed)
else:
return None
def list_of_various(tt, acceptable):
"""Parse zero or more of a list of elements from the child nodes.
Each element of the list can be any type from the list of the acceptable
nodes.
"""
r = []
for child in kids(tt):
if name(child) not in acceptable:
LOG.warn(_LW('Expected one of %(expected)s under'
' %(parent)s, got %(actual)s.'),
{'expected': acceptable,
'parent': name(tt),
'actual': repr(name(child))})
result = parse_any(child)
if result is not None:
r.append(result)
return r
def dom_to_tupletree(node):
"""Convert a DOM object to a pyRXP-style tuple tree.
Each element is a 4-tuple of (NAME, ATTRS, CONTENTS, None).
Very nice for processing complex nested trees.
"""
if node.nodeType == node.DOCUMENT_NODE:
# boring; pop down one level
return dom_to_tupletree(node.firstChild)
assert node.nodeType == node.ELEMENT_NODE
node_name = node.nodeName
attributes = {}
contents = []
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
contents.append(dom_to_tupletree(child))
elif child.nodeType == child.TEXT_NODE:
msg = "text node %s is not a string" % repr(child)
assert isinstance(child.nodeValue, six.string_types), msg
contents.append(child.nodeValue)
else:
raise RuntimeError("can't handle %s" % child)
for i in range(node.attributes.length):
attr_node = node.attributes.item(i)
attributes[attr_node.nodeName] = attr_node.nodeValue
return node_name, attributes, contents, None
def xml_to_tupletree(xml_string):
"""Parse XML straight into tupletree."""
dom_xml = xml.dom.minidom.parseString(xml_string)
return dom_to_tupletree(dom_xml)
| jcsp/manila | manila/share/drivers/emc/plugins/vnx/xml_api_parser.py | Python | apache-2.0 | 26,983 |
from __future__ import print_function, division, absolute_import
from timeit import default_timer as timer
import numpy as np
from numba import unittest_support as unittest
from numba import hsa, float32
class TestMatMul(unittest.TestCase):
def test_matmul_naive(self):
@hsa.jit
def matmul(A, B, C):
i = hsa.get_global_id(0)
j = hsa.get_global_id(1)
if i >= C.shape[0] or j >= C.shape[1]:
return
tmp = 0
for k in range(A.shape[1]):
tmp += A[i, k] * B[k, j]
C[i, j] = tmp
N = 256
A = np.random.random((N, N)).astype(np.float32)
B = np.random.random((N, N)).astype(np.float32)
C = np.zeros_like(A)
with hsa.register(A, B, C):
ts = timer()
matmul[(N // 16, N // 16), (16, 16)](A, B, C)
te = timer()
print("1st GPU time:", te - ts)
with hsa.register(A, B, C):
ts = timer()
matmul[(N // 16, N // 16), (16, 16)](A, B, C)
te = timer()
print("2nd GPU time:", te - ts)
ts = timer()
ans = np.dot(A, B)
te = timer()
print("CPU time:", te - ts)
np.testing.assert_allclose(ans, C, rtol=1e-5)
def test_matmul_fast(self):
blocksize = 20
gridsize = 20
@hsa.jit
def matmulfast(A, B, C):
x = hsa.get_global_id(0)
y = hsa.get_global_id(1)
tx = hsa.get_local_id(0)
ty = hsa.get_local_id(1)
sA = hsa.shared.array(shape=(blocksize, blocksize), dtype=float32)
sB = hsa.shared.array(shape=(blocksize, blocksize), dtype=float32)
if x >= C.shape[0] or y >= C.shape[1]:
return
tmp = 0
for i in range(gridsize):
# preload
sA[tx, ty] = A[x, ty + i * blocksize]
sB[tx, ty] = B[tx + i * blocksize, y]
# wait for preload to end
hsa.barrier(1)
# compute loop
for j in range(blocksize):
tmp += sA[tx, j] * sB[j, ty]
# wait for compute to end
hsa.barrier(1)
C[x, y] = tmp
N = gridsize * blocksize
A = np.random.random((N, N)).astype(np.float32)
B = np.random.random((N, N)).astype(np.float32)
C = np.zeros_like(A)
griddim = gridsize, gridsize
blockdim = blocksize, blocksize
with hsa.register(A, B, C):
ts = timer()
matmulfast[griddim, blockdim](A, B, C)
te = timer()
print("1st GPU time:", te - ts)
with hsa.register(A, B, C):
ts = timer()
matmulfast[griddim, blockdim](A, B, C)
te = timer()
print("2nd GPU time:", te - ts)
ts = timer()
ans = np.dot(A, B)
te = timer()
print("CPU time:", te - ts)
np.testing.assert_allclose(ans, C, rtol=1e-5)
if __name__ == '__main__':
unittest.main()
| gdementen/numba | numba/hsa/tests/hsapy/test_matmul.py | Python | bsd-2-clause | 3,127 |
import os
class ListadoCartas(object):
"""Representa el listado de cartas que un jugador aun no visualizo. Permite llevar cuenta de las cartas
que ya se vieron, para saber cuales conviene consultar."""
def __init__(self, personajes_inicial, armas_inicial, lugares_inicial):
"""Recibe un iterable para las cartas de personajes, armas y lugares."""
self.personajes = personajes_inicial[:]
self.armas = armas_inicial[:]
self.lugares = lugares_inicial[:]
def agregar(self, cadena, cartas, nombre):
cadena += nombre + ":" + os.linesep
for carta in cartas:
cadena += carta + os.linesep
cadena += "------" + os.linesep
return cadena
def __str__(self):
"""Convierte el listado en una cadena"""
return self.agregar(self.agregar(self.agregar("", self.personajes, "Personajes"), self.armas, "Armas"), self.lugares, "Lugares")
def sacar_de(self, tipo, carta):
if carta in tipo:
tipo.remove(carta)
def sacar_carta(self, carta):
"""Saca una determinada carta de los listados de personajes, armas y lugares (los marca como "vistos")"""
self.sacar_de(self.personajes, carta)
self.sacar_de(self.armas, carta)
self.sacar_de(self.lugares, carta)
| fbarrios/fiuba7540tp31c2015 | src/listado_cartas.py | Python | gpl-2.0 | 1,333 |
from inspect import signature
from functools import wraps
def typeassert(*ty_args, **ty_kwargs):
def decorate(func):
if not __debug__:
return func
sig = signature(func)
bound_types = sig.bind_partial(*ty_args, **ty_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
bound_values = sig.bind(*args, **kwargs)
for name, value in bound_values.arguments.items():
if name in bound_types:
if not isinstance(value, bound_types[name]):
raise TypeError('Argument {} must be {}'.format(name, bound_types[name]))
return func(*args, **kwargs)
return wrapper
return decorate
@typeassert(int, int)
def add(x, y):
return x + y
add(2, 3)
add(2, 'hello') | likeleon/Python | cookbook/9.7 데코레이터를 사용해서 함수에서 타입 확인 강제.py | Python | gpl-2.0 | 819 |
# -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin
import urllib2,urllib,cgi, re
import HTMLParser
import xbmcaddon
import json
import traceback
import os
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
import time
import sys
import CustomPlayer
import base64
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
addon_id = 'plugin.video.shahidmbcnet'
selfAddon = xbmcaddon.Addon(id=addon_id)
addonPath = xbmcaddon.Addon().getAddonInfo("path")
addonArt = os.path.join(addonPath,'resources/images')
#communityStreamPath = os.path.join(addonPath,'resources/community')
communityStreamPath = os.path.join(addonPath,'resources')
communityStreamPath =os.path.join(communityStreamPath,'community')
def PlayStream(sourceEtree, urlSoup, name, url):
try:
#url = urlSoup.url.text
pDialog = xbmcgui.DialogProgress()
pDialog.create('XBMC', 'Parsing the xml file')
pDialog.update(10, 'fetching channel info')
title=''
link=''
sc=''
try:
link=urlSoup.item.link.text
sc=sourceEtree.findtext('sname')
title=urlSoup.item.title.text
except: pass
if link=='':
timeD = 2000 #in miliseconds
line1="couldn't read title and link"
xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%(__addonname__,line1, timeD, __icon__))
return False
regexs = urlSoup.find('regex')
pDialog.update(20, 'Parsing info')
if (not regexs==None) and len(regexs)>0:
liveLink= getRegexParsed(urlSoup,link)
else:
liveLink= link
if len(liveLink)==0:
timeD = 2000 #in miliseconds
line1="couldn't read title and link"
xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%(__addonname__,line1, timeD, __icon__))
return False
timeD = 2000 #in miliseconds
line1="Resource found,playing now."
pDialog.update(30, line1)
liveLink=replaceSettingsVariables(liveLink)
name+='-'+sc+':'+title
if (sc=='GLArab' or sc=='Local') and '$GL-' in liveLink :
gcid=None
try:
gcid = urlSoup.item.glchannelid.text
if gcid and len(gcid)==0: gcid=None
except: pass
liveLink=replaceGLArabVariables(liveLink,pDialog,gcid, title)
if liveLink=="": return False
print 'liveLink',liveLink
pDialog.close()
listitem = xbmcgui.ListItem( label = str(name), iconImage = "DefaultVideo.png", thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=liveLink )
if not 'plugin.video.f4mTester' in liveLink:
player = CustomPlayer.MyXBMCPlayer()
start = time.time()
#xbmc.Player().play( liveLink,listitem)
player.play( liveLink,listitem)
xbmc.sleep(2000)
while player.is_active:
xbmc.sleep(200)
#return player.urlplayed
done = time.time()
elapsed = done - start
if player.urlplayed and elapsed>=3:
return True
else:
return False
else:
xbmc.executebuiltin('XBMC.RunPlugin('+liveLink+')')
return True
except:
traceback.print_exc(file=sys.stdout)
return False
def getRegexParsed(regexs, url,cookieJar=None,forCookieJarOnly=False,recursiveCall=False,cachedPages={}, rawPost=False):#0,1,2 = URL, regexOnly, CookieJarOnly
# cachedPages = {}
#print 'url',url
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
print 'doRegexs',doRegexs,regexs
for rege in doRegexs:
k=regexs.find("regex",{"name":rege})
if not k==None:
cookieJarParam=False
if k.cookiejar:
cookieJarParam=k.cookiejar.text;
if '$doregex' in cookieJarParam:
cookieJar=getRegexParsed(regexs, cookieJarParam,cookieJar,True, True,cachedPages)
cookieJarParam=True
else:
cookieJarParam=True
if cookieJarParam:
if cookieJar==None:
#print 'create cookie jar'
import cookielib
cookieJar = cookielib.LWPCookieJar()
#print 'cookieJar new',cookieJar
page=''
try:
page = k.page.text
except: pass
if '$doregex' in page:
page=getRegexParsed(regexs, page,cookieJar,recursiveCall=True,cachedPages=cachedPages)
print 'page',page
postInput=None
if k.post:
postInput = k.post.text
if '$doregex' in postInput:
postInput=getRegexParsed(regexs, postInput,cookieJar,recursiveCall=True,cachedPages=cachedPages)
print 'post is now',postInput
if k.rawpost:
postInput = k.rawpost.text
if '$doregex' in postInput:
postInput=getRegexParsed(regexs, postInput,cookieJar,recursiveCall=True,cachedPages=cachedPages,rawPost=True)
print 'rawpost is now',postInput
link=''
if not page=='' and page in cachedPages and forCookieJarOnly==False :
link = cachedPages[page]
else:
if page.startswith('http'):
print 'Ingoring Cache',page
if '$epoctime$' in page:
page=page.replace('$epoctime$',getEpocTime())
req = urllib2.Request(page)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if k.referer:
req.add_header('Referer', k.referer.text)
if k.agent:
req.add_header('User-agent', k.agent.text)
if not cookieJar==None:
#print 'cookieJarVal',cookieJar
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
#print 'after cookie jar'
post=None
if postInput and not k.rawpost:
postData=postInput
splitpost=postData.split(',');
post={}
for p in splitpost:
n=p.split(':')[0];
v=p.split(':')[1];
post[n]=v
post = urllib.urlencode(post)
if postInput and k.rawpost:
post=postInput
if post:
response = urllib2.urlopen(req,post)
else:
response = urllib2.urlopen(req)
link = response.read()
link=javascriptUnEscape(link)
response.close()
cachedPages[page] = link
if forCookieJarOnly:
return cookieJar# do nothing
elif not page.startswith('http'):
if page.startswith('$pyFunction:'):
val=doEval(page.split('$pyFunction:')[1],'',cookieJar )
if forCookieJarOnly:
return cookieJar# do nothing
link=val
else:
link=page
expres=k.expres.text
if '$doregex' in expres:
expres=getRegexParsed(regexs, expres,cookieJar,recursiveCall=True,cachedPages=cachedPages)
print 'link',link
print expres
if not expres=='':
if expres.startswith('$pyFunction:'):
val=doEval(expres.split('$pyFunction:')[1],link, cookieJar)
url = url.replace("$doregex[" + rege + "]", val)
else:
if not link=='':
reg = re.compile(expres).search(link)
val=reg.group(1).strip()
else:
val=expres
if k.rawpost:
print 'rawpost'
val=urllib.quote_plus(val)
if k.htmlunescape:
#val=urllib.unquote_plus(val)
import HTMLParser
val=HTMLParser.HTMLParser().unescape(val)
url = url.replace("$doregex[" + rege + "]",val )
else:
url = url.replace("$doregex[" + rege + "]", '')
if '$epoctime$' in url:
url=url.replace('$epoctime$',getEpocTime())
if '$GUID$' in url:
import uuid
url=url.replace('$GUID$',str(uuid.uuid1()).upper())
if '$get_cookies$' in url:
url=url.replace('$get_cookies$',getCookiesString(cookieJar))
if recursiveCall: return url
print 'final url',url
return url
def getCookiesString(cookieJar):
try:
cookieString=""
for index, cookie in enumerate(cookieJar):
cookieString+=cookie.name + "=" + cookie.value +";"
except: pass
print 'cookieString',cookieString
return cookieString
def getEpocTime():
import time
return str(int(time.time()*1000))
def javascriptUnEscape(str):
js=re.findall('unescape\(\'(.*?)\'',str)
print 'js',js
if (not js==None) and len(js)>0:
for j in js:
#print urllib.unquote(j)
str=str.replace(j ,urllib.unquote(j))
return str
def doEval(fun_call,page_data=None,Cookie_Jar=None):
ret_val=''
#if profile not in sys.path:
# sys.path.append(profile)
print fun_call
try:
py_file='import '+fun_call.split('.')[0]
#print py_file
exec( py_file)
except: pass
exec ('ret_val='+fun_call)
#exec('ret_val=1+1')
print 'ret_val',ret_val
return str(ret_val)
def replaceSettingsVariables(str):
retVal=str
if '$setting' in str:
matches=re.findall('\$(setting_.*?)\$', str)
for m in matches:
setting_val=selfAddon.getSetting( m )
retVal=retVal.replace('$'+m+'$',setting_val)
return retVal
def send_web_socket(Cookie_Jar,url_to_call):
try:
import urllib2
import base64
import uuid
req = urllib2.Request(url_to_call)
str_guid=str(uuid.uuid1()).upper()
str_guid=base64.b64encode(str_guid)
req.add_header('Connection', 'Upgrade')
req.add_header('Upgrade', 'websocket')
req.add_header('Sec-WebSocket-Key', str_guid)
req.add_header('Origin','http://www.streamafrik.com')
req.add_header('Pragma','no-cache')
req.add_header('Cache-Control','no-cache')
req.add_header('Sec-WebSocket-Version', '13')
req.add_header('Sec-WebSocket-Extensions', 'permessage-deflate; client_max_window_bits, x-webkit-deflate-frame')
req.add_header('User-Agent','Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53')
cookie_handler = urllib2.HTTPCookieProcessor(Cookie_Jar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
from keepalive import HTTPHandler
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
urllib2.urlopen(req)
response.close()
return ''
except: traceback.print_exc(file=sys.stdout)
return ''
def get_dag_url(page_data):
print 'get_dag_url',page_data
if page_data.startswith('http://dag.total-stream.net'):
headers=[('User-Agent','Verismo-BlackUI_(2.4.7.5.8.0.34)')]
page_data=getUrl(page_data,headers=headers);
if '127.0.0.1' in page_data:
return revist_dag(page_data)
elif re_me(page_data, 'wmsAuthSign%3D([^%&]+)') != '':
final_url = re_me(page_data, '&ver_t=([^&]+)&') + '?wmsAuthSign=' + re_me(page_data, 'wmsAuthSign%3D([^%&]+)') + '==/mp4:' + re_me(page_data, '\\?y=([^&]+)&')
else:
final_url = re_me(page_data, 'href="([^"]+)"[^"]+$')
if len(final_url)==0:
final_url=page_data
final_url = final_url.replace(' ', '%20')
return final_url
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data)
if m != None:
match = m.group(1)
else:
match = ''
return match
def revist_dag(page_data):
final_url = ''
if '127.0.0.1' in page_data:
final_url = re_me(page_data, '&ver_t=([^&]+)&') + ' live=true timeout=15 playpath=' + re_me(page_data, '\\?y=([a-zA-Z0-9-_\\.@]+)')
if re_me(page_data, 'token=([^&]+)&') != '':
final_url = final_url + '?token=' + re_me(page_data, 'token=([^&]+)&')
elif re_me(page_data, 'wmsAuthSign%3D([^%&]+)') != '':
final_url = re_me(page_data, '&ver_t=([^&]+)&') + '?wmsAuthSign=' + re_me(page_data, 'wmsAuthSign%3D([^%&]+)') + '==/mp4:' + re_me(page_data, '\\?y=([^&]+)&')
else:
final_url = re_me(page_data, 'HREF="([^"]+)"')
if 'dag1.asx' in final_url:
return get_dag_url(final_url)
if 'devinlivefs.fplive.net' not in final_url:
final_url = final_url.replace('devinlive', 'flive')
return final_url
def get_unwise( str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=unwise_func(w,i,s,e)
except: traceback.print_exc(file=sys.stdout)
#print 'unpacked',page_value
return page_value
def unwise_func( w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)#.join('');
I1lI = ''.join(l1lI)#.join('');
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
#print 'array i',lIll,len(ll1l)
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
#print 'val is ', lI1l[lIll: lIll+2]
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
print 'STILL GOing'
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return get_unwise(ret)
else:
print 'FINISHED'
return ret
def get_unpacked( page_value, regex_for_text='', iterations=1, total_iteration=1):
try:
if page_value.startswith("http"):
page_value= getUrl(page_value)
print 'page_value',page_value
if regex_for_text and len(regex_for_text)>0:
page_value=re.compile(regex_for_text).findall(page_value)[0] #get the js variable
page_value=unpack(page_value,iterations,total_iteration)
except: traceback.print_exc(file=sys.stdout)
print 'unpacked',page_value
return page_value
def unpack(sJavascript,iteration=1, totaliterations=2 ):
print 'iteration',iteration
if sJavascript.startswith('var _0xcb8a='):
aSplit=sJavascript.split('var _0xcb8a=')
ss="myarray="+aSplit[1].split("eval(")[0]
exec(ss)
a1=62
c1=int(aSplit[1].split(",62,")[1].split(',')[0])
p1=myarray[0]
k1=myarray[3]
with open('temp file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(k1))
#aa=1/0
else:
aSplit = sJavascript.split("rn p}('")
print aSplit
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=('"+aSplit[1].split(".spli")[0]+')'
exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
# print ' p array is ',len(aSplit)
# print len(aSplit )
#p=str(aSplit[0]+'))')#.replace("\\","")#.replace('\\\\','\\')
#print aSplit[1]
#aSplit = aSplit[1].split(",")
#print aSplit[0]
#a = int(aSplit[1])
#c = int(aSplit[2])
#k = aSplit[3].split(".")[0].replace("'", '').split('|')
#a=int(a)
#c=int(c)
#p=p.replace('\\', '')
# print 'p val is ',p[0:100],'............',p[-100:],len(p)
# print 'p1 val is ',p1[0:100],'............',p1[-100:],len(p1)
#print a,a1
#print c,a1
#print 'k val is ',k[-10:],len(k)
# print 'k1 val is ',k1[-10:],len(k1)
e = ''
d = ''#32823
#sUnpacked = str(__unpack(p, a, c, k, e, d))
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
#print sUnpacked[:200]+'....'+sUnpacked[-100:], len(sUnpacked)
# print sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
#exec('sUnpacked1="'+sUnpacked1+'"')
if iteration>=totaliterations:
# print 'final res',sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
return sUnpacked1#.replace('\\\\', '\\')
else:
# print 'final res for this iteration is',iteration
return unpack(sUnpacked1,iteration+1)#.replace('\\', ''),iteration)#.replace('\\', '');#unpack(sUnpacked.replace('\\', ''))
def __unpack(p, a, c, k, e, d, iteration,v=1):
#with open('before file'+str(iteration)+'.js', "wb") as filewriter:
# filewriter.write(str(p))
while (c >= 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
#re.sub('\\b' + aa +'\\b', k[c], p) THIS IS Bloody slow!
if v==1:
p=re.sub('\\b' + aa +'\\b', k[c], p)# THIS IS Bloody slow!
else:
p=findAndReplaceWord(p,aa,k[c])
#p=findAndReplaceWord(p,aa,k[c])
#with open('after file'+str(iteration)+'.js', "wb") as filewriter:
# filewriter.write(str(p))
return p
#
#function equalavent to re.sub('\\b' + aa +'\\b', k[c], p)
def findAndReplaceWord(source_str, word_to_find,replace_with):
splits=None
splits=source_str.split(word_to_find)
if len(splits)>1:
new_string=[]
current_index=0
for current_split in splits:
#print 'here',i
new_string.append(current_split)
val=word_to_find#by default assume it was wrong to split
#if its first one and item is blank then check next item is valid or not
if current_index==len(splits)-1:
val='' # last one nothing to append normally
else:
if len(current_split)==0: #if blank check next one with current split value
if ( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_'):# first just just check next
val=replace_with
#not blank, then check current endvalue and next first value
else:
if (splits[current_index][-1].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') and (( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')):# first just just check next
val=replace_with
new_string.append(val)
current_index+=1
#aaaa=1/0
source_str=''.join(new_string)
return source_str
def __itoa(num, radix):
# print 'num red',num, radix
result = ""
if num==0: return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
def getCookiesString(cookieJar):
try:
cookieString=""
for index, cookie in enumerate(cookieJar):
cookieString+=cookie.name + "=" + cookie.value +";"
except: pass
print 'cookieString',cookieString
return cookieString
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, returnResponse=False):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
if returnResponse: return response
link=response.read()
response.close()
return link;
#copies from lamda's implementation
def get_ustream(url):
try:
for i in range(1, 51):
result = getUrl(url)
if "EXT-X-STREAM-INF" in result: return url
if not "EXTM3U" in result: return
xbmc.sleep(2000)
return
except:
return
def get_saw_rtmp(page_value, referer=None):
if referer:
referer=[('Referer',referer)]
if page_value.startswith("http"):
page_url=page_value
page_value= getUrl(page_value,headers=referer)
str_pattern="(eval\(function\(p,a,c,k,e,d.*)"
reg_res=re.compile(str_pattern).findall(page_value)
r=""
if reg_res and len(reg_res)>0:
for v in reg_res:
r1=get_unpacked(v)
r2=re_me(r1,'\'(.*?)\'')
if 'unescape' in r1:
r1=urllib.unquote(r2)
r+=r1+'\n'
print 'final value is ',r
page_url=re_me(r,'src="(.*?)"')
page_value= getUrl(page_url,headers=referer)
print page_value
rtmp=re_me(page_value,'streamer\'.*?\'(.*?)\'\)')
playpath=re_me(page_value,'file\',\s\'(.*?)\'')
return rtmp+' playpath='+playpath +' pageUrl='+page_url
def get_leton_rtmp(page_value, referer=None):
if referer:
referer=[('Referer',referer)]
if page_value.startswith("http"):
page_value= getUrl(page_value,headers=referer)
str_pattern="var a = (.*?);\s*var b = (.*?);\s*var c = (.*?);\s*var d = (.*?);\s*var f = (.*?);\s*var v_part = '(.*?)';"
reg_res=re.compile(str_pattern).findall(page_value)[0]
a,b,c,d,f,v=(reg_res)
f=int(f)
a=int(a)/f
b=int(b)/f
c=int(c)/f
d=int(d)/f
ret= 'rtmp://' + str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d) + v;
return ret
def get_packed_iphonetv_url(page_data):
import re,base64,urllib;
s=page_data
while 'geh(' in s:
if s.startswith('lol('): s=s[5:-1]
# print 's is ',s
s=re.compile('"(.*?)"').findall(s)[0];
s= base64.b64decode(s);
s=urllib.unquote(s);
print s
return s
def decrypt_vaughnlive(encrypted):
retVal=""
for val in encrypted.split(':'):
retVal+=chr(int(val.replace("0m0",""))/84/5)
return retVal
def replaceGLArabVariables(link, d,gcid, title):
try:
GLArabUserName=selfAddon.getSetting( "GLArabUserName" )
GLArabUserPwd=selfAddon.getSetting( "GLArabUserPwd" )
GLArabServerLOW=selfAddon.getSetting( "GLArabServerLOW" )
GLArabServerHD=selfAddon.getSetting( "GLArabServerHD" )
GLArabServerMED=selfAddon.getSetting( "GLArabServerMED" )
GLArabServerLR=selfAddon.getSetting( "GLArabServerLR" )
GLArabServerLOWNP=selfAddon.getSetting( "GLArabServerLOWNP" )
glLocalProxy=selfAddon.getSetting( "isGLProxyEnabled" )=="true" and 'Proxy' in title
glproxyCommon=selfAddon.getSetting( "isGLCommonProxyEnabled" )=="true" and 'Proxy' in title
glProxyAddress=selfAddon.getSetting( "GLproxyName" )
if glProxyAddress=="": glProxyAddress="127.0.0.1"
pattern='channel=(.*?)\&'
link=link.replace('$GLProxyIP$',glProxyAddress)
ProxyCall=True
if 'Proxy' not in title:
print 'Not a proxy'
glLocalProxy=False
glproxyCommon=False
pattern='7777\/(.*?)\.m3u8'
GLArabServerLOW=GLArabServerLOWNP
print 'low nonproxy',GLArabServerLOW
ProxyCall=False
elif glLocalProxy==False and glproxyCommon==False:
print 'Proxy but no proxy call'
return ''
videoPath='KuwaitSpace_Med'
try:
videoPath=re.compile(pattern).findall(link)[0]
except: pass
print 'videoPath',videoPath
if GLArabServerLOW=="": GLArabServerLOW="Try All"
if GLArabServerHD=="": GLArabServerHD="Try All"
if GLArabServerMED=="": GLArabServerMED="Try All"
if GLArabServerLR=="": GLArabServerLR="Try All"
GLArabQuality=selfAddon.getSetting( "GLArabQuality" )
tryLogin=True
if GLArabUserName=="" or GLArabUserPwd=="":# or '$GL-IPHD$' not in link or '$GL-IPMED$' not in link:
tryLogin=False
timeD = 2000 #in miliseconds
line1="Login not defined, using default login and low quality"
GLArabQuality=""
#xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%(__addonname__,line1, timeD, __icon__))
print line1
#if GLArabServer=="": GLArabServer="Low 38.99.146.43:7777"
#GLArabServer=GLArabServer.split(' ')[1]
#GLArabQuality="" if GLArabQuality=="Low" or GLArabQuality=="" else '_'+GLArabQuality
import cookielib
cookieJar = cookielib.LWPCookieJar()
#def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
try:
if tryLogin:
mainpage=getUrl('http://www.glarab.com/',cookieJar)
evalidation=re.compile(' id="__EVENTVALIDATION" value="(.*?)"').findall(mainpage)[0]
vstate=re.compile('id="__VIEWSTATE" value="(.*?)"').findall(mainpage)[0]
post={'pageHeader$ScriptManager1':'pageHeader$UpdatePanel1|pageHeader$buttonLogin','__EVENTTARGET':'','__EVENTARGUMENT':'','__VIEWSTATE':vstate,'__EVENTVALIDATION':evalidation,'pageHeader$txtUsername':GLArabUserName,'pageHeader$txtPassword':GLArabUserPwd,'pageHeader$buttonLogin':' '}
post = urllib.urlencode(post)
getUrl('http://www.glarab.com/homepage.aspx',cookieJar,post)
else:
getUrl('http://www.glarab.com/',cookieJar)
except:
print 'login or accessing the site failed.. continuing'
traceback.print_exc(file=sys.stdout)
if gcid:
gcUrl='https://apps.glwiz.com:448/uniwebappandroidads/(S(g01ykv45pojkhpzwap1u14dy))/ajax.ashx?channel=tv&chid=%s&'%gcid
print gcUrl,'gcUrl'
gcidhtml=getUrl(gcUrl)
print gcidhtml
patt='makeHttpRequestNoCache\\(\'(.*?)\''
gcurl='https://apps.glwiz.com:448/uniwebappandroidads/(S(g01ykv45pojkhpzwap1u14dy))/'+ re.compile(patt).findall(gcidhtml)[0]
print 'gcurl',gcurl
gcurl=gcurl.replace(' ','%20')
sessionpage=getUrl(gcurl,cookieJar)
print sessionpage
session=sessionpage.split(':')[2]
sessionserver=sessionpage.split(':')[0].replace(':2077','')
elif glLocalProxy or glproxyCommon:
gcUrl=base64.b64decode('aHR0cHM6Ly9hcHBzLmdsd2l6LmNvbTo0NDgvVW5pV2ViQXBwQW5kcm9pZC9hamF4LmFzaHg/c3RyZWFtPXR2JnBwb2ludD1NQkNNYXNlckRyYW1hX0hpZ2gmY2hpZD0zMDM2MDAmY2huYW1lPU1CQyUyME1hc2VyJTIwRHJhbWEmY2x1c3Rlcm5hbWU9eml4aSY=')
#print gcUrl,'gcUrl'
sessionpage=getUrl(gcUrl)
print sessionpage
session=sessionpage.split(':')[2]
sessionserver=sessionpage.split(':')[0].replace(':2077','')
else:
hell_pat='hello-data", "(.*?)"'
header=[('Referer','http://www.glarab.com/homepage.aspx')]
hellHtml=getUrl('http://www.glarab.com/js/glapi.ashx ',cookieJar,headers=header)
hello_data=re.compile(hell_pat).findall(hellHtml)[0]
header=[('X-hello-data',hello_data),('Referer','http://www.glarab.com/player.aspx')]
sessionpage=getUrl('http://www.glarab.com/ajax.aspx?stream=live&type=reg&ppoint=%s'%videoPath,cookieJar,headers=header)
print sessionpage
session=sessionpage.split('|')[1]
sessionserver=sessionpage.split('|')[2].replace(':2077','')
serverPatern=''
serverAddress=''
type='low'
if '$GL-IPLOW$' in link or 'Low' in title:
if not ProxyCall:
serverPatern='GLArabServerLOWNP.*values="(.*?)"'
else:
serverPatern='GLArabServerLOW.*values="(.*?)"'
link=link.replace('$GL-IPLOW$',GLArabServerLOW)
serverAddress=GLArabServerLOW
type='low'
if '$GL-IPHD$' in link or 'High' in title or 'HD' in title:
print 'i am here',GLArabServerHD
serverPatern='GLArabServerHD.*values="(.*?)"'
link=link.replace('$GL-IPHD$',GLArabServerHD)
serverAddress=GLArabServerHD
type='hd'
if '$GL-IPMED$' in link or 'Med' in title:
serverPatern='GLArabServerMED.*values="(.*?)"'
link=link.replace('$GL-IPMED$',GLArabServerMED)
serverAddress=GLArabServerMED
print GLArabServerMED,'GLArabServerMED '
type='med'
if '$GL-IPLR$' in link or 'LR' in title:
serverPatern='GLArabServerLR.*values="(.*?)"'
link=link.replace('$GL-IPLR$',GLArabServerLR)
serverAddress=GLArabServerLR
print GLArabServerLR,'GLArabServerLR '
type='lr'
link=link.replace('$GL-Qlty$',GLArabQuality)
link=link.replace('$GL-Sesession$',session)
print 'the links is ',link
if 'Try All' in link:
fileName=communityStreamPath+'/../settings.xml'
settingsData= open(fileName, "r").read()
#print settingsData
servers=re.compile(serverPatern).findall(settingsData)[0]
servers=servers.replace('Disabled|Try All|','').split('|')
#print servers
if 1==1:#not glProxy:
servers.insert(0,sessionserver);
print 'new',servers
i=0
for server in servers:
i+=1
if d.iscanceled(): return ""
d.update(30+(50*1/len(servers)), 'Trying server %s'%server)
try:
finalUrl=link.replace('Try All',server)
if not glLocalProxy:
ret=getUrl(finalUrl,timeout=8);
if 'm3u8?' in ret:
link=finalUrl
d.update(90, 'Working server found %s'%server)
return link
break
else:
res=getUrl(finalUrl,timeout=8,returnResponse=True);
data=res.read(2000)
if data and len(data)>1000:
print 'working proxy found',finalUrl
d.update(90, 'Working server found %s'%server)
link=finalUrl
return link;#just return
break
except: pass
if glproxyCommon:
try:
#4500/channel.flv?server=8.21.48.20&channel=AlJadeed_HD
newLink='http://178.33.241.201:4500/channel.flv?server=8.21.48.19&channel=%s'%videoPath
print 'trying common proxy here',newLink
res=getUrl(newLink,timeout=15,returnResponse=True);
data=res.read(2000)
print 'data here',len(data),repr(data)
if data and len(data)>1000:
print 'custom proxy found',newLink
d.update(90, 'Working server found (Common proxy)')
return newLink
except: pass
#if glProxy and 'Try All' in link:
# link=link.replace('Try All',serverAddress)
# if 'High' in title or 'HD' in title:
# link=link.replace('Try All',serverAddress)
# else:
# link=link.replace('Try All',sessionserver.replace(':7777',''))
#if type=='low' or type=='med' or type=='lr':
# link=getProxyLink(glProxyAddress,sessionserver.replace(':7777',''),videoPath,session)
#else:
# link=getProxyLink(glProxyAddress,serverAddress.replace(':7777',''),videoPath,session)
if 'Try All' in link:
print 'no working link',link
link=''
return link
except:
traceback.print_exc(file=sys.stdout)
return link
def getProxyLink(proxy,server,video_path,session):
return 'http://%s:4500/channel.flv?server=%s&channel=%s&port=2077&session=%s'%(proxy,server,video_path,session)
| mirzasany/mirza | plugin.video.shahidmbcnet/resources/community/genericPlayer.py | Python | gpl-2.0 | 34,963 |
#!/usr/bin/env python3
import logging
import types
from collections import defaultdict
import os
import sys
import ipaddress
import itertools
import glob
import yaml
from typing import Dict, Tuple
try:
from yaml import CSafeLoader as SafeLoader # type: ignore
except ImportError:
from yaml import SafeLoader # type: ignore
from . import host
from .config import CONFIGINSTANCE as Config
class Hostlist(list):
def __init__(self):
super().__init__()
self.fileheaders = {}
def __str__(self):
return '\n'.join([str(h) for h in self])
def diff(self, otherhostlist) -> types.SimpleNamespace:
diff = types.SimpleNamespace()
diff.add, diff.remove = [], []
hostnames = {h.fqdn: h.ip for h in self if h.publicip}
inversehostlist = {h.fqdn: h for h in self}
otherhostnames = {h.fqdn: h.ip for h in otherhostlist if h.publicip}
inverseotherhostlist = {h.fqdn: h for h in otherhostlist}
for fqdn, ip in hostnames.items():
if otherhostnames.get(fqdn) != ip:
diff.add.append(inversehostlist[fqdn])
for fqdn, ip in otherhostnames.items():
if hostnames.get(fqdn) != ip:
diff.remove.append(inverseotherhostlist[fqdn])
diff.empty = (not diff.add) and (not diff.remove)
return diff
class DNSVSHostlist(Hostlist):
"Hostlist filed from DNSVS"
def __init__(self, input: Dict[str, Tuple[str, bool]]) -> None:
super().__init__()
for hostname, data in input.items():
ip, is_nonunique = data
self.append(host.Host(hostname, ip, is_nonunique))
class YMLHostlist(Hostlist):
"Hostlist filed from yml file"
def __init__(self):
super().__init__()
self.groups = defaultdict(list)
input_ymls = sorted(glob.glob(Config["hostlistdir"] + '/*.yml'))
logging.debug("Using %s" % ', '.join(input_ymls))
for inputfile in input_ymls:
self._add_ymlhostfile(inputfile)
def _add_ymlhostfile(self, fname):
"parse all hosts in fname and add them to this hostlist"
shortname = os.path.splitext(os.path.basename(fname))[0]
if shortname.count('-') > 1:
logging.error('Filename %s contains to many dashes. Skipped.')
return
if '-' in shortname:
# get abc, def from hostlists/abc-def.yml
hosttype, institute = shortname.split('-')
else:
hosttype = shortname
institute = None
try:
infile = open(fname, 'r')
except:
logging.error('file %s not readable' % fname)
return
try:
yamlsections = yaml.load_all(infile, Loader=SafeLoader)
except yaml.YAMLError as e:
logging.error('file %s not correct yml' % fname)
logging.error(str(e))
return
for yamlout in yamlsections:
self._parse_section(yamlout, fname, hosttype, institute)
self._fix_docker_ports()
def _parse_section(self, yamlout, fname, hosttype, institute):
for field in ('header', 'hosts'):
if field not in yamlout:
logging.error('missing field %s in %s' % (field, fname))
header = yamlout['header']
if 'iprange' in header:
ipstart, ipend = header['iprange']
header['iprange'] = ipaddress.ip_address(ipstart), ipaddress.ip_address(ipend)
self.fileheaders[os.path.basename(fname)] = header
for hostdata in yamlout["hosts"]:
newhost = host.YMLHost(hostdata, hosttype, institute, header)
self.append(newhost)
for group in newhost.groups:
self.groups[group].append(newhost)
def _fix_docker_ports(self):
for h in self:
if 'docker' in h.vars and 'ports' in h.vars['docker']:
# prefix docker ports with container IP
h.vars['docker']['ports'] = [
str(h.ip) + ':' + port for port in h.vars['docker']['ports']
]
def print(self, filter):
filtered = [h for h in self if h.filter(filter)]
for h in filtered:
if logging.getLogger().level == logging.DEBUG:
print(h.output(printgroups=True, printallvars=True))
elif logging.getLogger().level == logging.INFO:
print(h.output(delim='\t', printgroups=True))
else:
print(h.hostname)
def check_consistency(self, cnames):
checks = {
'nonunique': self.check_nonunique(),
'cnames': self.check_cnames(cnames),
'duplicates': self.check_duplicates(),
'missing_mac_ip': self.check_missing_mac_ip(),
}
for h in self:
for hcheck,hstatus in h.run_checks().items():
if not hstatus:
checks.update({hcheck:hstatus})
if isinstance(self, YMLHostlist):
checks.update({'iprange_overlap': self.check_iprange_overlap()})
logging.info("consistency check finished")
for check,status in checks.items():
if not status and not ('ignore_checks' in Config and
check in Config["ignore_checks"]):
sys.exit(1)
def check_nonunique(self):
"""ensure nonunique flag agrees with nonunique_ips config"""
success = True
nonunique_ips = defaultdict(list)
for h in self:
ip_fit = str(h.ip) in Config["nonunique_ips"]
if ip_fit and h.vars['unique']:
nonunique_ips[str(h.ip)].append(h)
if not ip_fit and not h.vars['unique']:
logging.error("Host %s has nonunique ip flag, "
"but its ip is not listed in the config." % h)
success = False
for ip in nonunique_ips:
if len(nonunique_ips[ip]) > 1:
logging.error("More than one host uses a given nonunique ip"
" without being flagged:\n" +
('\n'.join((str(x) for x in nonunique_ips[ip]))))
success = False
return success
def check_cnames(self, cnames):
"""ensure there are no duplicates between hostlist and cnames"""
success = True
for cname in cnames:
has_dest = False
for h in self:
if h.fqdn == cname.fqdn:
logging.error("%s conflicts with %s." % (cname, h))
success = False
if cname.dest == h.fqdn:
has_dest = True
if not has_dest:
logging.error("%s points to a non-existing host." % cname)
success = False
return success
def check_duplicates(self):
"""check consistency of hostlist
detect duplicates (ip, mac, hostname)"""
success = True
inverselist = {}
tocheck_props = ['ip', 'mac', 'hostname']
for prop in tocheck_props:
inverselist[prop] = {}
for h in self:
myhostprop = getattr(h, prop)
if myhostprop is None:
continue
if prop == 'ip' and str(myhostprop) in Config["nonunique_ips"]:
# allow nonunique ips if listed in config
continue
if myhostprop in inverselist[prop]:
logging.error("Found duplicate %s for hosts \n%s\n%s"
% (prop, inverselist[prop][myhostprop], h))
success = False
inverselist[prop][myhostprop] = h
return success
def check_missing_mac_ip(self) -> bool:
"""check if hosts are missing an ip or mac"""
success = True
for h in self:
if 'needs_ip' in h.groups and h.ip is None:
logging.error("Missing IP in %s ", h)
success = False
if isinstance(self, YMLHostlist):
for h in self:
if 'needs_mac' in h.groups and h.mac is None:
logging.error("Missing MAC in %s ", h)
success = False
return success
def check_iprange_overlap(self) -> bool:
"check whether any of the ipranges given in headers overlap"
overlaps = []
for ita, itb in itertools.combinations(self.fileheaders.items(), 2):
filea, headera = ita
fileb, headerb = itb
try:
a = headera['iprange']
b = headerb['iprange']
except KeyError:
# one of the files does not have iprange defined, ignore it
continue
if headera.get('iprange_allow_overlap', False) or \
headerb.get('iprange_allow_overlap', False):
# FIXME: check overlap for internal IPs
continue
# check if there is overlap between a and b
overlap_low = max(a[0], b[0])
overlap_high = min(a[1], b[1])
if overlap_low <= overlap_high:
overlaps.append((overlap_low, overlap_high, filea, fileb))
if overlaps:
for overlap in overlaps:
logging.error("Found overlap from %s to %s in files %s and %s." % overlap)
return not bool(overlaps)
| particleKIT/hostlist | hostlist/hostlist.py | Python | gpl-3.0 | 9,496 |
#!/usr/bin/env python
from home import lightingControl as lc
import logging
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import ServerFactory
from twisted.internet import task
from twisted.internet import reactor
logging.basicConfig()
class lightingProtocol(LineReceiver):
def __init__(self):
self.logger = logging.getLogger('LightingProtocol')
self.logger.setLevel(logging.DEBUG)
def lineReceived(self, line):
sline = line.split()
if sline[0] == "read":
self.logger.debug("Sending Data")
class lightingServer(ServerFactory):
protocol = lightingProtocol
def __init__(self):
self.p = "poop"
if __name__ == "__main__":
lights = lc.lightingControl()
light_loop = task.LoopingCall(lights.set_lights)
light_loop.start(30)
reactor.listenTCP(50000, lightingServer())
reactor.run()
| RossWilliamson/home_automation | bin/lightingServer.py | Python | bsd-2-clause | 906 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='mfs',
version='0.1.0',
license='MIT license',
description='mfs is a set of utilities to ease image download from some Russian modelling forums',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Alexandre Ovtchinnikov',
author_email='abc@miroag.com',
url='https://github.com/miroag/mfs',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'beautifulsoup4', 'requests', 'aiohttp', 'tqdm', 'docopt'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'mfs = mfs.cli:main',
]
},
)
| miroag/mfs | setup.py | Python | mit | 2,863 |
## \file
## \ingroup tutorial_dataframe
## \notebook -draw
## \brief The Higgs to two photons analysis from the ATLAS Open Data 2020 release, with RDataFrame.
##
## This tutorial is the Higgs to two photons analysis from the ATLAS Open Data release in 2020
## (http://opendata.atlas.cern/release/2020/documentation/). The data was taken with the ATLAS detector
## during 2016 at a center-of-mass energy of 13 TeV. Although the Higgs to two photons decay is very rare,
## the contribution of the Higgs can be seen as a narrow peak around 125 GeV because of the excellent
## reconstruction and identification efficiency of photons at the ATLAS experiment.
##
## The analysis is translated to a RDataFrame workflow processing 1.7 GB of simulated events and data.
##
## \macro_image
## \macro_code
## \macro_output
##
## \date February 2020
## \author Stefan Wunsch (KIT, CERN)
import ROOT
import os
# Enable multi-threading
ROOT.ROOT.EnableImplicitMT()
# Create a ROOT dataframe for each dataset
path = "root://eospublic.cern.ch//eos/opendata/atlas/OutreachDatasets/2020-01-22"
df = {}
df["data"] = ROOT.RDataFrame("mini", (os.path.join(path, "GamGam/Data/data_{}.GamGam.root".format(x)) for x in ("A", "B", "C", "D")))
df["ggH"] = ROOT.RDataFrame("mini", os.path.join(path, "GamGam/MC/mc_343981.ggH125_gamgam.GamGam.root"))
df["VBF"] = ROOT.RDataFrame("mini", os.path.join(path, "GamGam/MC/mc_345041.VBFH125_gamgam.GamGam.root"))
processes = list(df.keys())
# Apply scale factors and MC weight for simulated events and a weight of 1 for the data
for p in ["ggH", "VBF"]:
df[p] = df[p].Define("weight",
"scaleFactor_PHOTON * scaleFactor_PhotonTRIGGER * scaleFactor_PILEUP * mcWeight");
df["data"] = df["data"].Define("weight", "1.0")
# Select the events for the analysis
for p in processes:
# Apply preselection cut on photon trigger
df[p] = df[p].Filter("trigP")
# Find two good muons with tight ID, pt > 25 GeV and not in the transition region between barrel and encap
df[p] = df[p].Define("goodphotons", "photon_isTightID && (photon_pt > 25000) && (abs(photon_eta) < 2.37) && ((abs(photon_eta) < 1.37) || (abs(photon_eta) > 1.52))")\
.Filter("Sum(goodphotons) == 2")
# Take only isolated photons
df[p] = df[p].Filter("Sum(photon_ptcone30[goodphotons] / photon_pt[goodphotons] < 0.065) == 2")\
.Filter("Sum(photon_etcone20[goodphotons] / photon_pt[goodphotons] < 0.065) == 2")
# Compile a function to compute the invariant mass of the diphoton system
ROOT.gInterpreter.Declare(
"""
using Vec_t = const ROOT::VecOps::RVec<float>;
float ComputeInvariantMass(Vec_t& pt, Vec_t& eta, Vec_t& phi, Vec_t& e) {
ROOT::Math::PtEtaPhiEVector p1(pt[0], eta[0], phi[0], e[0]);
ROOT::Math::PtEtaPhiEVector p2(pt[1], eta[1], phi[1], e[1]);
return (p1 + p2).mass() / 1000.0;
}
""")
# Define a new column with the invariant mass and perform final event selection
hists = {}
for p in processes:
# Make four vectors and compute invariant mass
df[p] = df[p].Define("m_yy", "ComputeInvariantMass(photon_pt[goodphotons], photon_eta[goodphotons], photon_phi[goodphotons], photon_E[goodphotons])")
# Make additional kinematic cuts and select mass window
df[p] = df[p].Filter("photon_pt[goodphotons][0] / 1000.0 / m_yy > 0.35")\
.Filter("photon_pt[goodphotons][1] / 1000.0 / m_yy > 0.25")\
.Filter("m_yy > 105 && m_yy < 160")
# Book histogram of the invariant mass with this selection
hists[p] = df[p].Histo1D(
ROOT.RDF.TH1DModel(p, "Diphoton invariant mass; m_{#gamma#gamma} [GeV];Events", 30, 105, 160),
"m_yy", "weight")
# Run the event loop
ggh = hists["ggH"].GetValue()
vbf = hists["VBF"].GetValue()
data = hists["data"].GetValue()
# Create the plot
# Set styles
ROOT.gROOT.SetStyle("ATLAS")
# Create canvas with pads for main plot and data/MC ratio
c = ROOT.TCanvas("c", "", 700, 750)
upper_pad = ROOT.TPad("upper_pad", "", 0, 0.35, 1, 1)
lower_pad = ROOT.TPad("lower_pad", "", 0, 0, 1, 0.35)
for p in [upper_pad, lower_pad]:
p.SetLeftMargin(0.14)
p.SetRightMargin(0.05)
p.SetTickx(False)
p.SetTicky(False)
upper_pad.SetBottomMargin(0)
lower_pad.SetTopMargin(0)
lower_pad.SetBottomMargin(0.3)
upper_pad.Draw()
lower_pad.Draw()
# Fit signal + background model to data
upper_pad.cd()
fit = ROOT.TF1("fit", "([0]+[1]*x+[2]*x^2+[3]*x^3)+[4]*exp(-0.5*((x-[5])/[6])^2)", 105, 160)
fit.FixParameter(5, 125.0)
fit.FixParameter(4, 119.1)
fit.FixParameter(6, 2.39)
fit.SetLineColor(2)
fit.SetLineStyle(1)
fit.SetLineWidth(2)
data.Fit("fit", "", "E SAME", 105, 160)
fit.Draw("SAME")
# Draw background
bkg = ROOT.TF1("bkg", "([0]+[1]*x+[2]*x^2+[3]*x^3)", 105, 160)
for i in range(4):
bkg.SetParameter(i, fit.GetParameter(i))
bkg.SetLineColor(4)
bkg.SetLineStyle(2)
bkg.SetLineWidth(2)
bkg.Draw("SAME")
# Draw data
data.SetMarkerStyle(20)
data.SetMarkerSize(1.2)
data.SetLineWidth(2)
data.SetLineColor(ROOT.kBlack)
data.Draw("E SAME")
data.SetMinimum(1e-3)
data.SetMaximum(8e3)
data.GetYaxis().SetLabelSize(0.045)
data.GetYaxis().SetTitleSize(0.05)
data.SetStats(0)
data.SetTitle("")
# Scale simulated events with luminosity * cross-section / sum of weights
# and merge to single Higgs signal
lumi = 10064.0
ggh.Scale(lumi * 0.102 / 55922617.6297)
vbf.Scale(lumi * 0.008518764 / 3441426.13711)
higgs = ggh.Clone()
higgs.Add(vbf)
higgs.Draw("HIST SAME")
# Draw ratio
lower_pad.cd()
ratiobkg = ROOT.TF1("zero", "0", 105, 160)
ratiobkg.SetLineColor(4)
ratiobkg.SetLineStyle(2)
ratiobkg.SetLineWidth(2)
ratiobkg.SetMinimum(-125)
ratiobkg.SetMaximum(250)
ratiobkg.GetXaxis().SetLabelSize(0.08)
ratiobkg.GetXaxis().SetTitleSize(0.12)
ratiobkg.GetXaxis().SetTitleOffset(1.0)
ratiobkg.GetYaxis().SetLabelSize(0.08)
ratiobkg.GetYaxis().SetTitleSize(0.09)
ratiobkg.GetYaxis().SetTitle("Data - Bkg.")
ratiobkg.GetYaxis().CenterTitle()
ratiobkg.GetYaxis().SetTitleOffset(0.7)
ratiobkg.GetYaxis().SetNdivisions(503, False)
ratiobkg.GetYaxis().ChangeLabel(-1, -1, 0)
ratiobkg.GetXaxis().SetTitle("m_{#gamma#gamma} [GeV]")
ratiobkg.Draw()
ratiosig = ROOT.TH1F("ratiosig", "ratiosig", 5500, 105, 160)
ratiosig.Eval(fit)
ratiosig.SetLineColor(2)
ratiosig.SetLineStyle(1)
ratiosig.SetLineWidth(2)
ratiosig.Add(bkg, -1)
ratiosig.Draw("SAME")
ratiodata = data.Clone()
ratiodata.Add(bkg, -1)
ratiodata.Draw("E SAME")
for i in range(1, data.GetNbinsX()):
ratiodata.SetBinError(i, data.GetBinError(i))
# Add legend
upper_pad.cd()
legend = ROOT.TLegend(0.55, 0.55, 0.89, 0.85)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.05)
legend.SetTextAlign(32)
legend.AddEntry(data, "Data" ,"lep")
legend.AddEntry(bkg, "Background", "l")
legend.AddEntry(fit, "Signal + Bkg.", "l")
legend.AddEntry(higgs, "Signal", "l")
legend.Draw("SAME")
# Add ATLAS label
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(72)
text.SetTextSize(0.05)
text.DrawLatex(0.18, 0.84, "ATLAS")
text.SetTextFont(42)
text.DrawLatex(0.18 + 0.13, 0.84, "Open Data")
text.SetTextSize(0.04)
text.DrawLatex(0.18, 0.78, "#sqrt{s} = 13 TeV, 10 fb^{-1}");
# Save the plot
c.SaveAs("HiggsToTwoPhotons.pdf");
| karies/root | tutorials/dataframe/df104_HiggsToTwoPhotons.py | Python | lgpl-2.1 | 7,173 |
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - INT32_Max
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
INT32_Max :
- size = 16
- range : [-2147483648, 2147483647]
Test cases :
------------
- INT32_Max parameter min value = -2147483648
- INT32_Max parameter min value out of bounds = -2147483649
- INT32_Max parameter max value = 2147483647
- INT32_Max parameter max value out of bounds = 2147483648
- INT32_Max parameter in nominal case = 50
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type INT32_Max - range [-2147483648, 2147483647]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/INT32_Max"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing INT32_Max in nominal case = 50
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT32_Max parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT32_Max parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("INT32_Max parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing INT32_Max minimal value = -2147483648
---------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT32_Max parameter min value = -2147483648
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT32_Max parameter set to -2147483648
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("INT32_Max parameter min value = -2147483648")
value = "-2147483648"
hex_value = "0x80000000"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing INT32_Max parameter value out of negative range
-------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT32_Max to -2147483649
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- INT32_Max parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("INT32_Max parameter min value out of bounds = -2147483649")
value = "-2147483649"
param_check = open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing INT32_Max parameter maximum value
-----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT32_Max to 2147483647
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT32_Max parameter set to 1000
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("INT32_Max parameter max value = 2147483647")
value = "2147483647"
hex_value = "0x7fffffff"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing INT32_Max parameter value out of positive range
-------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT32_Max to 2147483648
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- INT32_Max parameter not updated
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("INT32_Max parameter max value out of bounds = 2147483648")
value = "2147483648"
param_check = open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/INT32_Max").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| miguelgaio/parameter-framework | test/functional-tests-legacy/PfwTestCase/Types/tINT32_Max.py | Python | bsd-3-clause | 10,962 |
#!/usr/bin/env python
from ..models import BVLCAlex
import chainer
import fcn
def copy_alex_chainermodel(chainermodel_path, model):
bvlc_model = BVLCAlex()
chainer.serializers.load_hdf5(chainermodel_path, bvlc_model)
for link in bvlc_model.children():
link_name = link.name
if link_name.startswith('fc'):
continue
if getattr(model, link_name):
layer = getattr(model, link_name)
if layer.W.data.shape == link.W.data.shape:
layer.W.data = link.W.data
else:
print('link_name {0} has different shape {1} != {2}'.format(
link_name, layer.W.data.shape, link.W.data.shape))
def copy_vgg16_chainermodel(model):
vgg16_model = fcn.models.VGG16()
vgg16_path = vgg16_model.download()
chainer.serializers.load_npz(vgg16_path, vgg16_model)
for l in vgg16_model.children():
if l.name.startswith('conv'):
l1 = getattr(vgg16_model, l.name)
l2 = getattr(model, l.name)
assert l1.W.shape == l2.W.shape
assert l1.b.shape == l2.b.shape
l2.W.data[...] = l1.W.data[...]
l2.b.data[...] = l1.b.data[...]
elif l.name in ['fc6', 'fc7']:
if not hasattr(model, l.name):
continue
l1 = getattr(vgg16_model, l.name)
l2 = getattr(model, l.name)
if l1.W.size == l2.W.size and l1.b.size == l2.b.size:
l2.W.data[...] = l1.W.data.reshape(l2.W.shape)[...]
l2.b.data[...] = l1.b.data.reshape(l2.b.shape)[...]
| start-jsk/jsk_apc | demos/selective_dualarm_stowing/python/selective_dualarm_stowing/utils/copy_chainermodel.py | Python | bsd-3-clause | 1,611 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.