text stringlengths 4 1.02M | meta dict |
|---|---|
"""
[8/22/2012] Challenge #90 [intermediate] (Scientific Units Calculator)
https://www.reddit.com/r/dailyprogrammer/comments/ynw65/8222012_challenge_90_intermediate_scientific/
In the SI system, measurements of scientific quantities are expressed in terms of 7 standard 'base' units for various
quantities:
the "second" for time, the "meter" for length, "kilogram" for mass, the "ampere" for current, the "kelvin" for
temperature, the "mole" for amount of substence, and the
"candela" for light intensity.
These base units and exponents of them fully describe any measurable quantity. For example, lets say we wanted to
describe force. Force is defined as mass * acceleration.
accelleration is defined as velocity per second. velocity is defined as length per second. Therefore, force is
mass*length per second per second, so force is defined as
m kg s^-1 s^-1 in SI units.
Write a program that can read in a units expression involving multiplying and dividing units and output the correct
expression of those units in SI base units. Furthermore, you should make it so that your program ALSO accepts SI
derived units as well, such as "watts" or "pascals" (there is a list of SI derived units and their base definitions
[here] (http://en.wikipedia.org/wiki/SI_derived_units)). If you can, you should also include some simple aliases that
aren't even base units, such as 'mass' is 'kg' and 'velocity' is m/s.
Examples (input,output):
m/s*m*cd -> s^-1 m^2 cd
newton/m -> s^-2 kg
watt/velocity -> s^-2 m kg
BONUS: Make it so, when printing, if there is a simpler name for the quanity output than the base name, then it also
prints that as well. For example, s^-2 m kg is also
the definition of force in newtons, so when it prints watt/velocity it should output
s^-2 m kg (Newtons)
SUPER BONUS: Correctly parse and handle Metrix Prefixes, like giga,micro,nano, etc. So we could have
kilo-watt/mega-joule -> kilo-second
"""
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "0d29538ec56ed551ad812693d2e40c23",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 118,
"avg_line_length": 49.390243902439025,
"alnum_prop": 0.7387654320987654,
"repo_name": "DayGitH/Python-Challenges",
"id": "0374e696879596789cb59123ec2352627e62316f",
"size": "2025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20120822B.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
} |
'''
.. py:module:: contexts
:platform: Unix
Contexts for framing the tweets.
'''
import sys
import os
from abc import ABCMeta
import time
import json
import urllib
import urllib2
import operator
import logging
import traceback
import datetime
from random import choice
from PIL import Image
from pattern.en import parse
from collections import Counter
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'TwatBot.settings'
from django.conf import settings
from tweets.web import flickr, tinyurl
from tweets.models import Tweet, FlickrTweetImage, URLTweetImage
from tweets.utils import text, image, color as cu
from tweets.sentence import generate_text
from tweets.new_age import get_closest_mood_color
import interjections
logger = logging.getLogger("tweets.default")
class ABCContext():
"""Abstract base class for contexts.
.. note::
Create a child class of this and override :py:func:`build_tweet`.
"""
class Meta:
__metaclass__ = ABCMeta
def build_tweet(self, color_code, color_name, **kwargs):
"""Build tweet for color and its name.
.. note::
Override in child class!
"""
return "%s %s" % (color_code, color_name)
class TextContext(ABCContext):
"""Basic text context. Nothing fancy here, yet."""
def build_tweet(self, color_name, **kwargs):
"""Build tweet for color name.
**Args:**
| color_name (str): Human readable name for the color.
| \**kwargs: Optional keyword arguments. Should have ``color_code`` -key and supports optionally at least ``everycolorbot_url`` which personalizes the response for Everycolorbot.
**Returns:**
str, tweet for the color code-name pair.
"""
if "everycolorbot_url" in kwargs:
return (".everycolorbot Gee, that's a nice color. I call it %s." % color_name, 0.0)
return ("By hard thinking, I have come to the conclusion that color %s is called %s. #geniusatwork" % (color_code, color_name), 0.0)
class DCContext(ABCContext):
"""Framing with 'random' new age liirumlaarum from Deepak Chopra.
Uses `Wisdom of Deepak Chopra <www.wisdomofchopra.com>`_ as a help for framing.
.. warning::
www.wisdomofchopra.com seems to be down.
"""
url = 'http://www.wisdomofchopra.com/iframe.php'
def __init__(self):
from bs4 import BeautifulSoup as bs
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet
self.bs = bs
self.tokenizer = word_tokenize
self.pos_tag = pos_tag
self.wordnet = wordnet
def build_tweet(self, color_name, wisdom_count = 5, **kwargs):
"""Build tweet for color name.
**Args:**
| color_name (str): Human readable name for the color.
| wisdom_count (int): How many different wisdoms are considered in order to find the best framing.
| \**kwargs: Optional keyword arguments. Should have ``color_code`` -key and supports optionally at least ``retweet``.
**Returns:**
str, tweet for the color code-name pair. If no tweet can be constructed (e.g. wisdomofchopra is down, no internet connection),
returns None.
"""
for k, v in kwargs.items():
print k, v
wisdoms = self._get_wisdoms(color_name, wisdom_count = wisdom_count)
if not wisdoms: return None
tweets = []
for wis in wisdoms:
tokenized = self.tokenizer(wis)
tagged_wisdom = self.pos_tag(tokenized)
ret = self._get_color_place(color_name, tagged_wisdom)
if ret is not None:
place, value = ret
tweet = self._prettify_tweet(color_name, tokenized, place)
if 'retweet' in kwargs.keys():
tweet = "RT @{} {} {}".format(kwargs['screen_name'], kwargs['original_tweet'], tweet)
if len(tweet) <= 140:
tweets.append((tweet, value))
if len(tweets) > 0:
sorted_tweets = sorted(tweets, key = operator.itemgetter(1), reverse = True)
return sorted_tweets[0]
else:
return None
def _get_wisdoms(self, color_name, wisdom_count = 3):
"""Get wisdoms from www.wisdomofchopra.com."""
if type(wisdom_count) is not int or wisdom_count < 1:
raise ValueError("wisdom_count must be positive integer.")
wisdoms = []
while len(wisdoms) < wisdom_count:
try:
resp = urllib2.urlopen(self.url).read()
soup = self.bs(resp)
wisdom = soup.find(id = "quote").find('h2').string
wisdom = wisdom.strip().strip("\"")
wisdom = wisdom if wisdom[-1] == "." else wisdom + "."
if (len(wisdom) + len(color_name)) <= 120:
wisdoms.append(wisdom)
except:
break # If there is no connection, etc.
if len(wisdoms) == 0:
return None
else:
return wisdoms
def _get_color_place(self, color_name, tagged_wisdom):
"""Define place where to put the color name in the tagged sentence."""
place_candidates = []
for i in xrange(len(tagged_wisdom)):
if tagged_wisdom[i][1][:2] == 'JJ':
place_candidates.append(i)
if tagged_wisdom[i][1][:2] == 'NN':
if i == 0 or tagged_wisdom[i-i][1][:2] != 'NN':
place_candidates.append(i)
if len(place_candidates) == 0:
return None
color_split = color_name.split()
color_synsets = []
for c in color_split:
cs = self.wordnet.synsets(c)
if len(cs) > 0:
for synset in cs:
color_synsets.append(synset)
if len(color_synsets) == None:
return None
place_fits = {}
for place in place_candidates:
place_fits[place] = 0.0
pos = self.wordnet.ADJ if tagged_wisdom[place][1][:2] == 'JJ' else self.wordnet.NOUN
place_synsets = self.wordnet.synsets(tagged_wisdom[place][0], pos = pos)
if len(place_synsets) > 0:
for place_synset in place_synsets:
for csynset in color_synsets:
sim = place_synset.path_similarity(csynset)
if sim > place_fits[place]:
place_fits[place] = sim
sorted_sim = sorted(place_fits.items(), key = operator.itemgetter(1), reverse = True)
if sorted_sim[0][1] == 0.0:
return None
else:
return sorted_sim[0]
def _prettify_tweet(self, color_name, tokenized, place):
if place == 0:
tokenized[0] = tokenized[0][0].lower() + tokenized[0][1:]
color_name = color_name[0].upper() + color_name[1:]
words = tokenized[:place] + color_name.split() + tokenized[place:]
return text.prettify(words)
class NewAgeContext(ABCContext):
"""Framing with 'random' new age liirumlaarum.
Bases on the work done by `Seb Pearce <http://sebpearce.com/bullshit/>`_.
"""
def __init__(self):
from bs4 import BeautifulSoup as bs
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet
self.bs = bs
self.tokenizer = word_tokenize
self.pos_tag = pos_tag
self.wordnet = wordnet
self.tweet_similarity_threshold = 3
self.memory_length = 15
def build_tweet(self, reasoning):
"""Build tweet for color name.
**Args:**
| color_name (str): Human readable name for the color.
| wisdom_count (int): How many different wisdoms are considered in order to find the best framing.
| \**kwargs: Optional keyword arguments. Should have ``color_code`` -key and supports optionally at least ``everycolorbot_url`` which personalizes the response for Everycolorbot.
**Returns:**
str, tweet for the color code-name pair. If no tweet can be constructed, returns False.
"""
color_name = reasoning.color_name
wisdoms = self._get_wisdoms(color_name, wisdom_count = 10)
if not wisdoms:
return None
tweets = []
for wis in wisdoms:
places, parsed_wisdom = self._get_color_places(color_name, wis)
if places is not None:
ret = self._evaluate_color_places(color_name, places, parsed_wisdom)
if ret is not None:
place, value = ret
tweet = self._prettify_tweet(color_name, parsed_wisdom, place)
if reasoning.retweet:
tweet = 'RT @{} "{}" {}'.format(reasoning.screen_name, reasoning.original_tweet, tweet)
tweet_len = 118 if reasoning.media else 140
if len(tweet) <= tweet_len:
tweets.append((tweet, 1.0 - value))
if len(tweets) == 0:
return False
sorted_tweets = sorted(tweets, key = operator.itemgetter(1))
reasoning.set_attr('tweet', sorted_tweets[0][0])
reasoning.values['context'] = sorted_tweets[0][1]
return True
def _get_wisdoms(self, color_name, wisdom_count = 3):
"""Get wisdoms from sentence module"""
if type(wisdom_count) is not int or wisdom_count < 1:
raise ValueError("wisdom_count must be positive integer.")
last_tweets = Tweet.objects.all()[:self.memory_length]
wisdoms = []
try:
while len(wisdoms) < wisdom_count:
wisdom = generate_text(1).strip()
logger.debug("Generated wisdom: {}".format(wisdom))
# Filter wisdoms too similar to latest tweets out
if self._approve_wisdom(wisdom, last_tweets):
wisdoms.append(wisdom.strip())
except:
return None
return wisdoms
def _get_color_places(self, color_name, sentence):
"""Define place where to put the color name in the sentence."""
split_wisdom = sentence.split(" ")
parsed_wisdom = []
place_candidates = []
removed = 0
for i in xrange(len(split_wisdom)):
if split_wisdom[i] == '<>':
place_candidates.append(i - removed)
removed += 1
else:
parsed_wisdom.append(split_wisdom[i])
if len(place_candidates) == 0:
return None, parsed_wisdom
return place_candidates, parsed_wisdom
def _evaluate_color_places(self, color_name, place_candidates, tagged_wisdom):
color_split = color_name.split()
color_synsets = []
for c in color_split:
cs = self.wordnet.synsets(c)
if len(cs) > 0:
color_synsets.append(cs[0])
if len(color_synsets) == None:
return None
place_fits = {}
for place in place_candidates:
place_fits[place] = 0.0
place_synsets = self.wordnet.synsets(tagged_wisdom[place])
if len(place_synsets) > 0:
for place_synset in place_synsets:
for csynset in color_synsets:
sim = place_synset.path_similarity(csynset)
if sim > place_fits[place]:
place_fits[place] = sim
sorted_sim = sorted(place_fits.items(), key = operator.itemgetter(1), reverse = True)
if sorted_sim[0][1] == 0.0:
return None
else:
return sorted_sim[0]
def _approve_wisdom(self, wisdom, last_tweets):
for t in last_tweets:
sw = text.same_words(wisdom, t.message)
if sw > self.tweet_similarity_threshold:
logger.debug("Discarding wisdom, because it was too similar ({}) with recent tweet: {}".format(sw, t.message))
return False
return True
def _prettify_tweet(self, color_name, tokenized, place):
if place == 0:
tokenized[0] = tokenized[0][0].lower() + tokenized[0][1:]
color_name = color_name[0].upper() + color_name[1:]
words = color_name.split() + tokenized[place:]
else:
words = tokenized[:place] + color_name.split() + tokenized[place:]
return text.prettify(words)
class MonkeyImageContext(ABCContext):
def build_tweet(self, reasoning):
emotion = reasoning.reaction
print emotion
ret = interjections.get(emotion, settings.WORD2VEC_MODEL)
if ret is None: return False
interjection, base = ret
url = tinyurl.get(reasoning.article['url'])
photo = self.get_flickr_monkey_photo(emotion)
#photo = False
if not photo:
print "plaa"
photo_url = self.get_google_monkey_photo(emotion)
if photo_url:
photo = self.download_url_photo(photo_url)
if not photo: return False
color = get_closest_mood_color(emotion, settings.WORD2VEC_MODEL)
color = cu.add_noise(color)
color = list(color)
color.append(192)
color = tuple(color)
photo = self.create_reaction_image(interjection, photo, color)
if not photo: return False
photo.interjection = base
photo.save()
save_path = os.path.join(settings.ROOT_DIR, photo.processed.path)
tags = self._get_tags(reasoning.article['text'])
stags = "#" + " #".join(tags) if len(tags) > 0 else ''
logger.info("Extracted tags '{}' for article '{}...'".format(stags, reasoning.article['url']))
tweet = url + " " + stags
while len(tweet) > 120:
tweet = tweet.rsplit(" ", 1)[0]
reasoning.set_attr('media', save_path)
reasoning.set_attr('tweet_image', photo)
reasoning.values['context'] = 1
reasoning.set_attr('tweet', tweet)
return True
def get_flickr_monkey_photo(self, emotion):
#search_text = "monkey {}".format(emotion)
search_text = "cat {}".format(emotion)
try:
logging.info("Searching Flickr for images with text: '{}'".format(search_text))
photos = flickr.photos_search(text = search_text)
except:
logger.error("Could not execute Flickr search because of error: {}".format(traceback.format_exc()))
return False
filtered_photos = []
for p in photos:
tags = p.tags.split()
if 'cat' in tags and 'text' not in tags:
filtered_photos.append(p)
logger.info("Found {} images for the Flickr search: {}".format(len(filtered_photos), search_text))
if len(filtered_photos) == 0:
return False
fphoto = choice(filtered_photos)
fti = FlickrTweetImage.objects.get_or_none(flickr_id = fphoto.id)
if fti is None:
fti = self.flickr_download_and_save(fphoto)
return fti
def get_google_monkey_photo(self, emotion):
#search_text = 'animal monkey {}'.format(emotion)
search_text = 'cat animal {}'.format(emotion)
start = 0
url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q={}&rsz=8&start={}'
current_url = url.format(search_text, start)
found = False
try:
while found == False:
logger.info("Searching Google (start = {}) for images with text: '{}'".format(start, search_text))
ret = urllib.urlopen(current_url).read()
ret = json.loads(ret)
status = ret['responseStatus']
if status != 200:
logger.info('Google image search returned response status {}, halting image search.'.format(status))
return False
results = ret['responseData']['results']
photo_url = self._filter_google_results(results)
if photo_url:
found = True
continue
start += 8
current_url = url.format(search_text, start)
if start > 60:
logger.info("Could not find suitable Google image in sufficient time. Halting image search.")
return False
time.sleep(1)
except:
logger.error("Could not open url {} because of error: {}".format(current_url, traceback.format_exc()))
return False
return photo_url
def download_url_photo(self, photo_url):
uti = URLTweetImage.objects.get_or_none(url = photo_url)
if uti is not None:
return uti
photo_name = self._get_image_name(photo_url)
upload_path = os.path.join(settings.MEDIA_ROOT, settings.ORIGINAL_IMAGE_UPLOAD_PATH, photo_name)
if not self._download_image(photo_url, upload_path):
return False
impath = os.path.join(settings.ORIGINAL_IMAGE_UPLOAD_PATH, photo_name)
inst = URLTweetImage(original = impath, url = photo_url)
return inst
def _filter_google_results(self, results):
if len(results) == 0:
return False
while len(results) > 0:
photo = choice(results)
content = photo['contentNoFormatting']
print photo['contentNoFormatting'], photo['visibleUrl']
if photo['visibleUrl'] == "www.shutterstock.com":
pass
elif self.has_cat(content):
logger.info("Found suitable image from Google with content: '{}'".format(content))
return photo['unescapedUrl']
results.remove(photo)
return False
def has_cat(self, content):
cats = ['cat', 'kitty', 'kitten', 'cats', 'kittens']
ts = content.lower().split()
for t in ts:
if t in cats:
return True
return False
def has_monkey(self, content):
monkeys = ['ape', 'apes', 'monkey', 'monkeys', 'baboon', 'baboons', 'gorilla', 'gorillas', 'makaki', 'chimpanzee', 'chimp', 'chimps']
ts = content.lower().split()
for t in ts:
if t in monkeys:
return True
return False
def create_reaction_image(self, text, photo, color):
try :
logging.info("Creating reaction image for {} ({}) with photo {}".format(text, str(color), photo.original.path))
photo_name = photo.original.path.rsplit("/", 1)[1]
photo_name = photo_name.rsplit(".", 1)[0] + ".png"
save_path = os.path.join(settings.MEDIA_ROOT, settings.PROCESSED_IMAGE_UPLOAD_PATH, photo_name)
img = Image.open(os.path.join(settings.MEDIA_ROOT, photo.original.path))
if photo.__class__.__name__ == "FlickrTweetImage":
caption = "Original image by {} @ Flickr".format(photo.flickr_user_name)
img = image.text2image(img, caption, background_color = (255, 255, 255, 100), font_color = (0, 0, 0, 255), font_size = 10, y_pos = 'down', x_pos = 'right', scale_font = False)
img = image.text2image(img, text.upper(), font_color = color)
img.save(save_path)
pro_path = os.path.join(settings.PROCESSED_IMAGE_UPLOAD_PATH, photo_name)
photo.processed = pro_path
photo.save()
except:
logger.error("Could not create reaction image because of error: {}".format(traceback.format_exc()))
return False
return photo
def flickr_download_and_save(self, photo):
'''Download photo and save it locally.
:param photo_id: Valid Flickr photo id
:type photo_id: str
:returns: int -- Ok = 0, failure > 0, check ``RETURN_CODES``
'''
image_urlz = photo.url_z
image_name = self._get_image_name(image_urlz)
abs_path = os.path.join(settings.MEDIA_ROOT, settings.ORIGINAL_IMAGE_UPLOAD_PATH, image_name)
upload_path = os.path.join(settings.ORIGINAL_IMAGE_UPLOAD_PATH, image_name)
r1 = self._download_image(image_urlz, abs_path)
if not r1: return False
instance = FlickrTweetImage(original = upload_path, \
flickr_id = photo.id, flickr_user_id = photo.owner,\
flickr_secret = photo.secret, flickr_farm = photo.farm,\
flickr_server = photo.server, title = photo.title,\
flickr_user_name = photo.owner_name,\
description = photo.description)
instance.save()
return instance
def _download_image(self, url, upload_path):
try:
logger.info("Downloading {} to {}".format(url, upload_path))
urllib.urlretrieve(url, upload_path)
except Exception as e:
logger.error("Image download failed: {}".format(traceback.print_exc(e)))
return False
# Check that downloaded content is indeed and image and not e.g.
# 403 page.
try:
img = Image.open(upload_path)
except Exception as e:
if os.path.isfile(upload_path):
logger.info("Deleting downloaded content from {} because of error: {}".format(upload_path, traceback.print_exc(e)))
os.remove(upload_path)
return False
return True
def _get_image_name(self, url):
photo_name = url.rsplit('/', 1)[1]
if len(photo_name.split(".")) == 1:
photo_name += '.jpg'
import re
time = datetime.datetime.now()
time = str(time)
time = re.sub(" ", "_", time)
time = re.sub(":", "", time)
time = time[:17]
photo_name = time + "_" + photo_name
return photo_name
def _get_tags(self, article):
'''Get most promising tag words from the text.'''
nnps = text.get_NNPs(article, True)
a = 3 if len(nnps) > 3 else len(nnps)
return [x[0] for x in nnps[:a]]
| {
"content_hash": "9aacb8dc3a42dfbf901ad291a71ecd85",
"timestamp": "",
"source": "github",
"line_count": 600,
"max_line_length": 191,
"avg_line_length": 38.92166666666667,
"alnum_prop": 0.5442983770821735,
"repo_name": "assamite/TwatBot",
"id": "785f72dfbed07f04982c965e7b037ca724c9aa27",
"size": "23353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweets/contexts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6852"
},
{
"name": "Python",
"bytes": "362171"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt, cstr, now, get_datetime_str
from frappe.utils.background_jobs import enqueue
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields
# once_only validation
# methods
def get_doc(arg1, arg2=None):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
There are two ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "test@example.com")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "test@example.com",
"user_roles: [
{"role": "System Manager"}
]
})
"""
if isinstance(arg1, BaseDocument):
return arg1
elif isinstance(arg1, basestring):
doctype = arg1
else:
doctype = arg1.get("doctype")
controller = get_controller(doctype)
if controller:
return controller(arg1, arg2)
raise ImportError, arg1
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, arg1, arg2=None):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if arg1 and isinstance(arg1, basestring):
if not arg2:
# single
self.doctype = self.name = arg1
else:
self.doctype = arg1
if isinstance(arg2, dict):
# filter
self.name = frappe.db.get_value(arg1, arg2, "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(arg1), arg2), frappe.DoesNotExistError)
else:
self.name = arg2
self.load_from_db()
elif isinstance(arg1, dict):
super(Document, self).__init__(arg1)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise frappe.DataError("Document({0}, {1})".format(arg1, arg2))
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlabel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlabel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def has_website_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_website_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return (frappe.has_website_permission(self.doctype, permtype, self, verbose=verbose)
or self.has_permission(permtype, verbose=verbose))
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
msg = _("No permission to {0} {1} {2}".format(perm_type, self.doctype, self.name or ""))
frappe.msgprint(msg)
raise frappe.PermissionError(msg)
def lock(self):
'''Will set docstatus to 3 + the current docstatus and mark it as queued
3 = queued for saving
4 = queued for submission
5 = queued for cancellation
'''
self.db_set('docstatus', 3 + self.docstatus, update_modified = False)
def unlock(self):
'''set the original docstatus at the time it was locked in the controller'''
current_docstatus = self.db_get('docstatus') - 4
if current_docstatus < 0:
current_docstatus = 0
self.db_set('docstatus', current_docstatus, update_modified = False)
def insert(self, ignore_permissions=None):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self.set_new_name()
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
self.db_insert()
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
return self
def update_children(self):
'''update child tables'''
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
'''sync child table for given fieldname'''
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# delete rows that do not match the ones in the
# document
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def set_new_name(self):
"""Calls `frappe.naming.se_new_name` for parent and child docs."""
set_new_name(self)
# set name for children
for d in self.get_all_children():
set_new_name(d)
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in values.iteritems():
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from tabSingles where doctype=%s""", self.doctype)
for field, value in d.iteritems():
if field != "doctype":
frappe.db.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_links()
self._validate_selects()
self._validate_constants()
self._validate_length()
self._sanitize_content()
self._save_passwords()
children = self.get_all_children()
for d in children:
d._validate_selects()
d._validate_constants()
d._validate_length()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
# extract images after validations to save processing if some validation error is raised
self._extract_images_from_text_editor()
for d in children:
d._extract_images_from_text_editor()
def apply_fieldlevel_read_permissions(self):
'''Remove values the user is not allowed to read (called when loading in desk)'''
has_higher_permlevel = False
for p in self.get_permissions():
if p.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).meta.get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
user_roles = frappe.get_roles()
self._has_access_to = []
for perm in self.get_permissions():
if perm.role in user_roles and perm.permlevel > 0 and perm.get(permission_type):
if perm.permlevel not in self._has_access_to:
self._has_access_to.append(perm.permlevel)
return self._has_access_to
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access()
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.get_value(self.doctype, self.name, "modified")
if cstr(modified) and cstr(modified) != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
If docstatus is > 2, it will throw exception as document is deemed queued
"""
if self.docstatus > 2:
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'), indicator='red')
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError, _("Cannot change docstatus from 0 to 2")
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError, _("Cannot change docstatus from 1 to 0")
elif docstatus==2:
raise frappe.ValidationError, _("Cannot edit cancelled document")
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print self.as_json().encode("utf-8")
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links:
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": "Table"}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = method.encode("utf-8")
return Document.hook(fn)(self, *args, **kwargs)
@staticmethod
def whitelist(f):
f.whitelisted = True
return f
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.set_title_field()
self.reset_seen()
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
if not self.flags.ignore_submit_comment:
self.add_comment("Submitted")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
if not self.flags.ignore_submit_comment:
self.add_comment("Cancelled")
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.run_method('on_change')
self.update_timeline_doc()
self.clear_cache()
self.notify_update()
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
self.latest = None
def clear_cache(self):
frappe.cache().hdel("last_modified", self.doctype)
self.clear_linked_with_cache()
def clear_linked_with_cache(self):
cache = frappe.cache()
def _clear_cache(d):
for df in (d.meta.get_link_fields() + d.meta.get_dynamic_link_fields()):
if d.get(df.fieldname):
doctype = df.options if df.fieldtype=="Link" else d.get(df.options)
name = d.get(df.fieldname)
if df.fieldtype=="Dynamic Link":
# clear linked doctypes list
cache.hdel("linked_doctypes", doctype)
# for all users, delete linked with cache and per doctype linked with cache
cache.delete_value("user:*:linked_with:{doctype}:{name}".format(doctype=doctype, name=name))
cache.delete_value("user:*:linked_with:{doctype}:{name}:*".format(doctype=doctype, name=name))
_clear_cache(self)
for d in self.get_all_children():
_clear_cache(d)
def reset_seen(self):
'''Clear _seen property and set current user as seen'''
if getattr(self.meta, 'track_seen', False):
self._seen = json.dumps([frappe.session.user])
def notify_update(self):
"""Publish realtime that the current document is modified"""
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
frappe.publish_realtime("list_update", {"doctype": self.doctype}, after_commit=True)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
@staticmethod
def whitelist(f):
"""Decorator: Whitelist method to be called remotely via REST API."""
f.whitelisted = True
return f
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method):
fn = getattr(self, method, None)
if not fn:
raise NotFound("Method {0} not found".format(method))
elif not getattr(fn, "whitelisted", False):
raise Forbidden("Method {0} not whitelisted".format(method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}".format(doc.idx, label, condition_str, val2))
else:
msg = _("Incorrect value: {0} must be {1} {2}".format(label, condition_str, val2))
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/desk#Form/{doctype}/{name}`"""
return "/desk#Form/{doctype}/{name}".format(doctype=self.doctype, name=self.name)
def add_comment(self, comment_type, text=None, comment_by=None, link_doctype=None, link_name=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
comment = frappe.get_doc({
"doctype":"Communication",
"communication_type": "Comment",
"sender": comment_by or frappe.session.user,
"comment_type": comment_type,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return comment
def add_seen(self, user=None):
'''add the given/current user to list of users who have seen this document (_seen)'''
if not user:
user = frappe.session.user
if self.meta.track_seen:
if self._seen:
_seen = json.loads(self._seen)
else:
_seen = []
if user not in _seen:
_seen.append(user)
self.db_set('_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation)).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def update_timeline_doc(self):
if frappe.flags.in_install or not self.meta.get("timeline_field"):
return
timeline_doctype = self.meta.get_link_doctype(self.meta.timeline_field)
timeline_name = self.get(self.meta.timeline_field)
if not (timeline_doctype and timeline_name):
return
# update timeline doc in communication if it is different than current timeline doc
frappe.db.sql("""update `tabCommunication`
set timeline_doctype=%(timeline_doctype)s, timeline_name=%(timeline_name)s
where
reference_doctype=%(doctype)s and reference_name=%(name)s
and (timeline_doctype is null or timeline_doctype != %(timeline_doctype)s
or timeline_name is null or timeline_name != %(timeline_name)s)""",
{
"doctype": self.doctype,
"name": self.name,
"timeline_doctype": timeline_doctype,
"timeline_name": timeline_name
})
def queue_action(self, action, **kwargs):
'''Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead'''
if action in ('save', 'submit', 'cancel'):
# set docstatus explicitly again due to inconsistent action
self.docstatus = {'save':0, 'submit':1, 'cancel': 2}[action]
else:
raise 'Action must be one of save, submit, cancel'
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
if hasattr(self, '_' + action):
action = '_' + action
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def execute_action(doctype, name, action, **kwargs):
'''Execute an action on a document (called by background worker)'''
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except frappe.ValidationError:
# add a comment (?)
doc.add_comment('Comment',
_('Action Failed') + '<br><br>' + json.loads(frappe.local.message_log[-1]).get('message'))
doc.notify_update()
except Exception:
# add a comment (?)
doc.add_comment('Comment',
_('Action Failed') + '<pre><code>' + frappe.get_traceback() + '</pre></code>')
doc.notify_update()
| {
"content_hash": "3141aaf4417951fc4ee7f95e46634bb2",
"timestamp": "",
"source": "github",
"line_count": 1008,
"max_line_length": 112,
"avg_line_length": 31.29265873015873,
"alnum_prop": 0.6846843990742796,
"repo_name": "vqw/frappe",
"id": "01d5847789d66bad7e6a578c215b68b75b1b41bb",
"size": "31644",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/model/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "325010"
},
{
"name": "GCC Machine Description",
"bytes": "1256"
},
{
"name": "HTML",
"bytes": "188788"
},
{
"name": "JavaScript",
"bytes": "1124997"
},
{
"name": "Python",
"bytes": "1497030"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""
ArrowHead
=========
Micro-framework for flowchart computing
_ _ _ _ _ _ _
____|_|_|_|_|_|_| |_| | |___|_|_|__|_|_| \\____________\\
| | | \ | \ |_| |_|_| | | |_ | | |_/ /
Arrowhead is a framework for constructing flow charts composed of steps
connected by (possibly) conditional branches. Such flows can be executed,
inspected (at runtime) and visualized (for documentation and verification)
The idea of flowchart computing is designed in a way that mimics a hand-made
drawing, typically on a white-board or a piece of paper, that describes some
process as a graph of interconnected steps.
Traditionally, once designed, the process is implemented as a collection of
functions and classes. Very often the original idea of the process was supposed
to work lost, especially after making changes over time. Usually it is
impossible to easily reconstruct the initial idea from a complex implementation
of that idea.
As an added issue, it is non-trivial to create derivative processes that
somehow override, change, replace or remove parts of the process. This can
affect one step or a particular group of steps.
The arrowhead framework aims to address both problems.
The process (or flow, as some communities like to call it) is encoded as a
class derived from the :class:`Flow` class. Inside each method decorated with
the ``@step`` decorator becomes a distinct step.
Each step may connect to other steps with arrows. This can be done with the
@arrow decorator. Arrows may be constrained by passing ``value=`` keyword
argument. Such arrows are followed only when ``step.value`` is equal to
``arrow.value``. Arrows may also be constrained by passing ``error=`` keyword
argument. Such arrows are followed only when an exception of that (or
derivative) type is raised by the step function. Arrows without either
constraint are always followed.
Each flow needs to have one initial and at least one accepting step. Flow
execution always starts with the initial step. The initial step can be changed
in derivative classes, in that case the base initial step is no longer
considered initial.
"""
__all__ = ['Flow', 'step', 'arrow', 'main']
__version__ = (1, 0, 0, "alpha", 2)
BUG_URL = "https://github.com/zyga/arrowhead"
from arrowhead.core import Flow
from arrowhead.decorators import step, arrow
from arrowhead.main import main
| {
"content_hash": "6ca7d619ee2d3f6f8856c15378ca8374",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 44.148148148148145,
"alnum_prop": 0.7290268456375839,
"repo_name": "zyga/arrowhead",
"id": "82d16316c74cf3ae464760f45c29e4f7c9b6d25e",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arrowhead/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53841"
}
],
"symlink_target": ""
} |
"""Test the zapwallettxes functionality.
- start three oakcoind nodes
- create four transactions on node 0 - two are confirmed and two are
unconfirmed.
- restart node 1 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 and verify that the confirmed transactions are still
available, but that the unconfirmed transaction has been zapped.
"""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import *
class ZapWalletTXesTest (OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 must be available (confirmed)
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['txid'], txid1) #tx1 must be available (confirmed)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx2['txid'], txid2) #tx2 must be available (unconfirmed)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx3 must be available (unconfirmed)
#restart oakcoind
self.nodes[0].stop()
oakcoind_processes[0].wait()
self.nodes[0] = start_node(0,self.options.tmpdir)
tx3 = self.nodes[0].gettransaction(txid3)
assert_equal(tx3['txid'], txid3) #tx must be available (unconfirmed)
self.nodes[0].stop()
oakcoind_processes[0].wait()
#restart oakcoind with zapwallettxes
self.nodes[0] = start_node(0,self.options.tmpdir, ["-zapwallettxes=1"])
assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3])
#there must be a expection because the unconfirmed wallettx0 must be gone by now
tx0 = self.nodes[0].gettransaction(txid0)
assert_equal(tx0['txid'], txid0) #tx0 (confirmed) must still be available because it was confirmed
if __name__ == '__main__':
ZapWalletTXesTest ().main ()
| {
"content_hash": "807cf9ba77d954c4330e586e7132b5d4",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 106,
"avg_line_length": 36.92307692307692,
"alnum_prop": 0.6350694444444445,
"repo_name": "stratton-oakcoin/oakcoin",
"id": "1c9ee6127e9a307af5edb4b5670b36a4b621e018",
"size": "3094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/zapwallettxes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "694076"
},
{
"name": "C++",
"bytes": "5098228"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "51512"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "190257"
},
{
"name": "Makefile",
"bytes": "112101"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1152477"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "53022"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os, sys, subprocess
from django.conf import settings
from emma.interface.models import Metadata, Keyword
import logging
#--------------------------------------------------------------------------------------------------
# Logging
# A directory will be created the first time watch is run.
#--------------------------------------------------------------------------------------------------
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(settings.APP_ROOT, 'subj2kw.log'),
filemode='w')
class Command(BaseCommand):
"""
Writes subject to keywords if keywords is empty.
"""
option_list = BaseCommand.option_list + (
make_option('-r', '--for-real',
action='store_true',
dest='action',
default=False,
help='Do the action.'),
)
def handle(self, *args, **options):
action = options.get('action', False)
print """
Looking for empty keywords fields in Metadata model.
Empty fields will be reported.
Add the -r flag to save to the Metadata & the Keyword
model.
"""
m = Metadata.objects.all()
for i in m:
if not i.keywords and i.subject:
print '---> keywords empty for %s but we have %s in subject' % (i.image_LNID, i.subject)
if action:
i.keywords = i.subject
i.save()
print 'saved to Metadata model'
# also save to the Keyword model
k = Keyword.objects.get(image_LNID=i.image_LNID)
k.keywords = i.subject
k.save()
print 'saved to Keyword model'
else:
print 'add the -r flag to save....'
if not i.keywords and not i.subject:
logging.warn('%s has neither keywords nor subject' % i.image_LNID)
print "done"
| {
"content_hash": "55998fa324ebd20c436409f404750919",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 104,
"avg_line_length": 29.523809523809526,
"alnum_prop": 0.44233870967741934,
"repo_name": "djangowebstudio/emma",
"id": "dea69598fe72f584fa30736aeb84c1b49b9e9977",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/management/commands/subj2kw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128352"
},
{
"name": "HTML",
"bytes": "161573"
},
{
"name": "JavaScript",
"bytes": "407437"
},
{
"name": "Python",
"bytes": "470191"
}
],
"symlink_target": ""
} |
def extractPickupnovelsCom(item):
'''
Parser for 'pickupnovels.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "ed0188fd635d0206446da7ee7b00b7cb",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 25.904761904761905,
"alnum_prop": 0.6268382352941176,
"repo_name": "fake-name/ReadableWebProxy",
"id": "509fbf0d80cebf6a1a4ff9dba02e4952c1264ee9",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractPickupnovelsCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
""" Contains the logic for `aq add cluster systemlist`. """
from aquilon.aqdb.model import SystemList
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.add_cluster_member_priority import \
CommandAddClusterMemberPriority
class CommandAddClusterSystemList(CommandAddClusterMemberPriority):
required_parameters = ["cluster", "member", "priority"]
resource_class = SystemList
priority_parameter = "priority"
| {
"content_hash": "f737bbea930ffc74ac845f011c656996",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.7819706498951782,
"repo_name": "guillaume-philippon/aquilon",
"id": "011b052a940f8c0f1fef8e1b087a0cd634e92a68",
"size": "1195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/aquilon/worker/commands/add_cluster_systemlist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jvm_platform_analysis import JvmPlatformExplain, JvmPlatformValidate
from pants_test.tasks.task_test_base import TaskTestBase
class JvmPlatformAnalysisTestMixin(object):
"""Common helper methods for testing JvmPlatformValidate and JvmPlatformExplain.
Mostly for building sets of targets that are interesting for testing.
"""
def _java(self, name, platform=None, deps=None, sources=None):
return self.make_target(spec='java:{}'.format(name),
target_type=JavaLibrary,
platform=platform,
dependencies=deps or [],
sources=sources)
def _plain(self, name, deps=None):
"""Make a non-jvm target, useful for testing non-jvm intermediate dependencies."""
return self.make_target(spec='java:{}'.format(name),
target_type=Dependencies,
dependencies=deps or [],)
def simple_task(self, targets, **options):
self.set_options(**options)
platforms = {
'6': { 'source': 6, 'target': 6, 'args': [], },
'7': { 'source': 7, 'target': 7, 'args': [], },
'8': { 'source': 8, 'target': 8, 'args': [], },
}
self.set_options_for_scope('jvm-platform', platforms=platforms, default_platform='6')
context = self.context(target_roots=targets)
return self.create_task(context)
def bad_targets(self):
one = self._java('one', '7')
two = self._java('two', '6', deps=[one])
return [one, two]
def good_targets(self):
one = self._java('one', '6')
two = self._java('two', '7', deps=[one])
return [one, two]
def bad_transitive_targets(self):
one = self._java('one', '7')
middle = self._plain('middle', deps=[one])
two = self._java('two', '6', deps=[middle])
return [one, two, middle]
def good_transitive_targets(self):
one = self._java('one', '6')
middle = self._plain('middle', deps=[one])
two = self._java('two', '7', deps=[middle])
return [one, two, middle]
def impossible_targets(self):
a = self._java('a', '8')
b = self._java('b', '7', deps=[a])
c = self._java('c', '6', deps=[b])
# :b depends on :a, which means :b can't have a target lower than 8.
# :b is depended on by :c, which means :b can't have a target level higher than 6.
return [a, b, c]
class JvmPlatformValidateTest(JvmPlatformAnalysisTestMixin, TaskTestBase):
@classmethod
def task_type(cls):
return JvmPlatformValidate
def assert_no_warning(self, targets, **options):
self.assertTrue(self.simple_task(targets, **options).execute() is None)
def assert_warning(self, targets, **options):
self.assertTrue(self.simple_task(targets, **options).execute() is not None)
def test_good_works(self):
self.assert_no_warning(self.good_targets(), check='fatal')
def test_transitive_good_works(self):
self.assert_no_warning(self.good_transitive_targets(), check='fatal')
def test_bad_fails(self):
with self.assertRaises(JvmPlatformValidate.IllegalJavaTargetLevelDependency):
self.simple_task(self.bad_targets(), check='fatal').execute()
def test_transitive_bad_fails(self):
with self.assertRaises(JvmPlatformValidate.IllegalJavaTargetLevelDependency):
self.simple_task(self.bad_transitive_targets(), check='fatal').execute()
def test_impossible_fails(self):
with self.assertRaises(JvmPlatformValidate.IllegalJavaTargetLevelDependency):
self.simple_task(self.impossible_targets(), check='fatal').execute()
def test_bad_ignored(self):
self.assert_no_warning(self.bad_targets(), check='off')
def test_transitive_bad_ignored(self):
self.assert_no_warning(self.bad_transitive_targets(), check='off')
def test_bad_warned(self):
self.assert_warning(self.bad_targets(), check='warn')
def test_transitive_bad_warned(self):
self.assert_warning(self.bad_transitive_targets(), check='warn')
def test_inverted_ordering_works(self):
self.assert_warning(self.bad_targets(), check='warn', children_before_parents=True)
class JvmPlatformExplainTest(JvmPlatformAnalysisTestMixin, TaskTestBase):
@classmethod
def task_type(cls):
return JvmPlatformExplain
def get_lines(self, targets, trimmed=True, **options):
output = self.simple_task(targets, **options).console_output(targets)
if trimmed:
output = [line.strip() for line in output if line and line.strip()]
return tuple(output)
def assert_lines(self, lines, targets, **options):
self.assertEqual(lines, self.get_lines(targets, **options))
def assert_length(self, count, targets, **options):
self.assertEqual(count, len(self.get_lines(targets, **options)))
def test_change_only_quiet(self):
lines = self.get_lines(self.good_targets(), only_broken=True)
self.assertEqual(1, len(lines))
self.assertIn('Allowable JVM platform ranges', lines[0])
def test_undetailed_good(self):
targets = self.good_transitive_targets()
self.assert_length(len(targets), targets, detailed=False)
def test_broken(self):
one = self._java('one', '7')
two = self._java('two', '6', deps=[one])
targets = [one, two]
expected = ('Allowable JVM platform ranges (* = anything):',
'java:one: <=1.6 (is 1.7)',
'max=1.6 because of dependees:',
'java:two',
'java:two: 1.7+ (is 1.6)',
'min=1.7 because of dependencies:',
'java:one',)
self.assert_lines(expected, targets, only_broken=True, colors=False)
def test_upgradeable(self):
one = self._java('one', '6')
two = self._java('two', '7', deps=[one])
three = self._java('three', '6', deps=[one])
text = '\n'.join(self.get_lines([one, two, three], colors=False, ranges=False, upgradeable=True))
self.assertNotIn('java:one', text)
self.assertIn('java:three', text)
self.assertIn('java:two', text)
def test_downgradeable(self):
one = self._java('one', '6')
two = self._java('two', '7', deps=[one])
nope = self._java('nope', '6', deps=[one])
text = '\n'.join(self.get_lines([one, two, nope], colors=False, ranges=False,
downgradeable=True))
self.assertIn('java:one', text)
self.assertNotIn('java:nope', text)
self.assertIn('java:two', text)
| {
"content_hash": "dc123bd88c069d63e8d2fe515e53599a",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 101,
"avg_line_length": 38.206896551724135,
"alnum_prop": 0.6459085439229844,
"repo_name": "laurentgo/pants",
"id": "b76edb61a8a8d092fac8808884af79d88b769ea2",
"size": "6795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/test_jvm_platform_analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "314216"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3200703"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "47285"
},
{
"name": "Thrift",
"bytes": "3002"
}
],
"symlink_target": ""
} |
import nusoft.package.local as local_package
import os
class Geant4(local_package.LocalPackage):
""" The GEANT4 installation package.
:param _tar_name: name of the tar file to download/install
:param _version: version of GEANT4 to install.
:param _source_path: path to place the source files
"""
def __init__(self, system, repository):
""" Initialise this geant4 installation package.
:param system: class that manages system commands
:type system: :class:`nusoft.system.System` instance
:param repository: local name of the repository the package is from
"""
super(Geant4, self).__init__(self._version, system, repository)
self._tar_name = self._version + ".tar.gz"
self._source_path = os.path.join(self._system.get_install_path(), "%s-source" % self._name)
def get_dependencies(self):
""" Return a list of dependency names
:returns: list of dependency package names
:rtype: list
"""
return ["make", "g++", "gcc", "cmake-2.8.12.1", "clhep-2.1.0.1"]
def _download(self):
""" Download the geant4 tar file."""
self._system.download("http://geant4.web.cern.ch/geant4/support/source/" + self._tar_name)
def _install(self):
""" Untar the tar file and install it to the install path."""
self._system.untar(self._tar_name, self._source_path, 1)
if not self._system.exists(self.get_install_path()):
os.makedirs(self.get_install_path())
cmake_opts = ["-DCMAKE_INSTALL_PREFIX=%s" % self.get_install_path(),
"-DCLHEP_VERSION_OK=2.1.0.1",
"-DCLHEP_LIBRARIES=%s" % os.path.join(self._dependencies["clhep-2.1.0.1"].get_install_path(), "lib"),
"-DCLHEP_INCLUDE_DIRS=%s" % os.path.join(self._dependencies["clhep-2.1.0.1"].get_install_path(), "include"),
self._source_path]
cmake = os.path.join(self._dependencies["cmake-2.8.12.1"].get_install_path(), "bin/cmake")
self._system.configure(command=cmake, args=cmake_opts, cwd=self.get_install_path())
self._system.make(cwd=self.get_install_path())
self._system.make(args=['install'], cwd=self.get_install_path())
def _update(self):
""" Nothing to do here..."""
pass
def _remove(self):
""" Remove the install directory."""
self._system.remove(self.get_install_path())
self._system.remove(self._source_path)
def _is_installed(self):
""" Check if geant4 is installed by looking for the geant4 executable in the bin directory.
:return: True if installed
"""
return self._system.is_library(os.path.join(self.get_install_path(), "lib/libG4event")) or \
self._system.is_library(os.path.join(self.get_install_path(), "lib64/libG4event"))
# The versions of geant4 that can be installed
versions = [type('Geant4944', (Geant4, object), {"_version" : "geant4.9.4.p04"})]
| {
"content_hash": "e68abf56ff27999732de2dcb92069664",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 130,
"avg_line_length": 49.52459016393443,
"alnum_prop": 0.6160211850380669,
"repo_name": "pgjones/nusoft",
"id": "aaa7e957a7eb5cac580192dae0c47db1c2017ef1",
"size": "3268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/hyperk/geant4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135460"
},
{
"name": "Shell",
"bytes": "6848"
}
],
"symlink_target": ""
} |
__version__=''' $Id: fonts.py 3342 2008-12-12 15:55:34Z andy $ '''
__doc__='''Utilities to associate bold and italic versions of fonts into families
Bold, italic and plain fonts are usually implemented in separate disk files;
but non-trivial apps want <b>this</b> to do the right thing. We therefore
need to keep 'mappings' between the font family name and the right group
of up to 4 implementation fonts to use.
Most font-handling code lives in pdfbase, and this probably should too.
'''
import sys, os
###############################################################################
# A place to put useful font stuff
###############################################################################
#
# Font Mappings
# The brute force approach to finding the correct postscript font name;
# much safer than the rule-based ones we tried.
# preprocessor to reduce font face names to the shortest list
# possible. Add any aliases you wish; it keeps looking up
# until it finds no more translations to do. Any input
# will be lowercased before checking.
_family_alias = {
'serif':'times',
'sansserif':'helvetica',
'monospaced':'courier',
'arial':'helvetica'
}
#maps a piddle font to a postscript one.
_tt2ps_map = {
#face, bold, italic -> ps name
('times', 0, 0) :'Times-Roman',
('times', 1, 0) :'Times-Bold',
('times', 0, 1) :'Times-Italic',
('times', 1, 1) :'Times-BoldItalic',
('courier', 0, 0) :'Courier',
('courier', 1, 0) :'Courier-Bold',
('courier', 0, 1) :'Courier-Oblique',
('courier', 1, 1) :'Courier-BoldOblique',
('helvetica', 0, 0) :'Helvetica',
('helvetica', 1, 0) :'Helvetica-Bold',
('helvetica', 0, 1) :'Helvetica-Oblique',
('helvetica', 1, 1) :'Helvetica-BoldOblique',
# there is only one Symbol font
('symbol', 0, 0) :'Symbol',
('symbol', 1, 0) :'Symbol',
('symbol', 0, 1) :'Symbol',
('symbol', 1, 1) :'Symbol',
# ditto for dingbats
('zapfdingbats', 0, 0) :'ZapfDingbats',
('zapfdingbats', 1, 0) :'ZapfDingbats',
('zapfdingbats', 0, 1) :'ZapfDingbats',
('zapfdingbats', 1, 1) :'ZapfDingbats',
}
_ps2tt_map={}
for k,v in _tt2ps_map.items():
if not _ps2tt_map.has_key(k):
_ps2tt_map[v.lower()] = k
def ps2tt(psfn):
'ps fontname to family name, bold, italic'
psfn = psfn.lower()
if _ps2tt_map.has_key(psfn):
return _ps2tt_map[psfn]
raise ValueError, "Can't map determine family/bold/italic for %s" % psfn
def tt2ps(fn,b,i):
'family name + bold & italic to ps font name'
K = (fn.lower(),b,i)
if _tt2ps_map.has_key(K):
return _tt2ps_map[K]
else:
fn, b1, i1 = ps2tt(K[0])
K = fn, b1|b, i1|i
if _tt2ps_map.has_key(K):
return _tt2ps_map[K]
raise ValueError, "Can't find concrete font for family=%s, bold=%d, italic=%d" % (fn, b, i)
def addMapping(face, bold, italic, psname):
'allow a custom font to be put in the mapping'
k = face.lower(), bold, italic
_tt2ps_map[k] = psname
_ps2tt_map[psname.lower()] = k
| {
"content_hash": "ddf2dc1422c4c497db2ee5b89063d529",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 95,
"avg_line_length": 35.90217391304348,
"alnum_prop": 0.5470784135634272,
"repo_name": "fergalmoran/Chrome2Kindle",
"id": "3da6a641245c2efe914ac1e975ec3081d8016f47",
"size": "3503",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "server/reportlab/lib/fonts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6787"
},
{
"name": "Python",
"bytes": "3048627"
}
],
"symlink_target": ""
} |
__author__ = 'developer'
import sys
import os
import numpy as np
from pprint import pprint
from math import sqrt
from random import random
# y = mx +b
# m is slope, b is y-intercept
def computeError(coefficients, points):
totalError = 0
N= len(points)
#iterate over all points
for i in range(0, N):
function_result = 0
for j in xrange(len(coefficients)):
function_result += coefficients[j] * (points[i][0]**j)
#print "Function result"
#print function_result
#sum up squares of single errors
try:
totalError += (points[i][1] - function_result) ** 2
except :
print "Number overflow"
return 1
return totalError
def sgd(coefficients, points, learningRate):
degree = len(coefficients)
gradients = [0]*degree
new_coefficients = [0]*degree
old_coefficients = coefficients
print("Coefficients")
pprint(coefficients)
counter = 0
error = 10
#stop conditions
while not(new_coefficients == old_coefficients) and counter < 4000 and error >0.01:
print "counter %i" % counter
old_coefficients = new_coefficients
new_coefficients = []
#iterate over all points
for i in xrange(0, len(points)):
new_coefficients = [0]*degree
#calculate different gradients
for j in xrange(0, degree):
coefficients_sum = 0;
for d in xrange(0, degree):
coefficients_sum += coefficients[d]*(points[i][0]**d)
gradients[j] = 2 * (coefficients_sum-points[i][1]) * (points[i][0]**j)
#print "gradient %f" % gradients[j]
new_coefficients[j] = coefficients[j] - learningRate*gradients[j]
#make a step after computing each of the partial gradients
coefficients = new_coefficients
#print("New coefficients")
#pprint(coefficients)
error = computeError(coefficients,points)
#print "Error"
#print(error)
counter +=1
return coefficients
def fitFunction(points, degree):
coefficients = []
#Initialize SGD with random start values
for i in xrange(0, degree+1):
coefficients.append(random())
coefficients = sgd(coefficients, points, 0.0004)
print "Final result"
pprint(coefficients)
if __name__ == "__main__":
degree = 2
if len(sys.argv) >= 2:
degree = sys.argv[1]
"""
#create random coefficients
coefficients = [0]*(degree+1)
for i in xrange(0,degree+1):
coefficients.append(random()*10)
#generate random sample
points = []
for i in xrange(1001):
y_value = 0
for j in xrange(0,len(coefficients)):
y_value += coefficients[j]*i**j
#introduce noise of +-1
noisy_y = y_value + (1-random()*2)
points.append((i,noisy_y))
pprint(points)
"""
#Test with exact values
degree = 2
points = []
for i in range(10):
a = 0.2
b = 0.2
c = 0.2
points.append((i, a*(i**2)+b*i+c))
print("Points of function")
pprint(points)
fitFunction(points, degree) | {
"content_hash": "1638333275e2d71b26acb3e51b2e80c8",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 87,
"avg_line_length": 26.349593495934958,
"alnum_prop": 0.5775995063252083,
"repo_name": "melhindi/Assignments_ParallelDataProcessingAndAnalysis",
"id": "b6fc31ed1ac0653a394fabcb84a4aa468d8579a8",
"size": "3263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assignment6/IPDPA_Assignment6_4_MEH.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39370"
}
],
"symlink_target": ""
} |
from test_framework import generic_test
def test_collatz_conjecture(n: int) -> bool:
# TODO - you fill in here.
return False
if __name__ == '__main__':
exit(
generic_test.generic_test_main('collatz_checker.py',
'collatz_checker.tsv',
test_collatz_conjecture))
| {
"content_hash": "80f2827550d64ceee8757aecb540b7e1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 27.846153846153847,
"alnum_prop": 0.5193370165745856,
"repo_name": "shobhitmishra/CodingProblems",
"id": "9e878cebb405cc3c9e69917bbd753852945cc4fa",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_judge_python/collatz_checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from hs_core import views
from hs_core.views.autocomplete import autocomplete
urlpatterns = [
# internal API
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/add-files-to-resource/$',
views.add_files_to_resource, name='add_files_to_resource'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/change-quota-holder/$',
views.change_quota_holder, name='change_quota_holder'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/(?P<element_name>[A-z]+)/add-metadata/$',
views.add_metadata_element, name='add_metadata_element'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/(?P<element_name>[A-z]+)/(?P<element_id>[A-z0-9]+)/update-metadata/$',
views.update_metadata_element, name='update_metadata_element'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/(?P<element_name>[A-z]+)/(?P<element_id>[A-z0-9]+)/delete-metadata/$',
views.delete_metadata_element, name='delete_metadata_element'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/delete-author/(?P<element_id>[A-z0-9]+)/$',
views.delete_author, name='delete_author'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/get-metadata/$',
views.get_resource_metadata, name='get_metadata'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/update-key-value-metadata/$',
views.update_key_value_metadata, name="update_key_value_metadata"),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/relevant-tools/$',
views.get_relevant_tools, name="get_relevant_tools"),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/delete-resource-file/(?P<f>[0-9]+)/$',
views.delete_file, name='delete_file'),
url(r'^_internal/(?P<shortkey>[A-z0-9]+)/delete-multiple-files/$',
views.delete_multiple_files, name='delete_multiple_files'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/delete-resource/(?P<usertext>[A-z]+)/$',
views.delete_resource, name='delete_resource'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/create-new-version-resource/$',
views.create_new_version_resource, name='create_resource_version'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/copy-resource/$', views.copy_resource,
name='copy_resource'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/list-referenced-content/$', views.list_referenced_content,
name='list_referenced_content'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/rep-res-bag-to-irods-user-zone/$',
views.rep_res_bag_to_irods_user_zone, name='replicate_bag_user_zone'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/set-resource-flag/$',
views.set_resource_flag, name='set_resource_flag'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/share-resource-with-user/(?P<privilege>[a-z]+)/(?P<user_id>[0-9]+)/$',
views.share_resource_with_user, name='share_resource_with_user'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/unshare-resource-with-user/(?P<user_id>[0-9]+)/$',
views.unshare_resource_with_user, name='unshare_resource_with_user'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/undo-share-resource-with-user/(?P<user_id>[0-9]+)/$',
views.undo_share_resource_with_user, name='undo_share_resource_with_user'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/share-resource-with-group/(?P<privilege>[a-z]+)/(?P<group_id>[0-9]+)/$',
views.share_resource_with_group, name='share_resource_with_group'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/unshare-resource-with-group/(?P<group_id>[0-9]+)/$',
views.unshare_resource_with_group, name='unshare_resource_with_group'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/undo-share-resource-with-group/(?P<group_id>[0-9]+)/$',
views.undo_share_resource_with_group, name='undo_share_resource_with_group'),
url(r'^_internal/create-user-group/$', views.create_user_group, name='create_user_group'),
url(r'^_internal/update-user-group/(?P<group_id>[0-9]+)$', views.update_user_group,
name='update_user_group'),
url(r'^_internal/delete-user-group/(?P<group_id>[0-9]+)$', views.delete_user_group,
name='delete_user_group'),
url(r'^_internal/restore-user-group/(?P<group_id>[0-9]+)$', views.restore_user_group,
name='restore_user_group'),
url(r'^_internal/share-group-with-user/(?P<group_id>[0-9]+)/(?P<user_id>[0-9]+)/(?P<privilege>[a-z]+)/$',
views.share_group_with_user, name='share_group_with_user'),
url(r'^_internal/unshare-group-with-user/(?P<group_id>[0-9]+)/(?P<user_id>[0-9]+)/$',
views.unshare_group_with_user, name='unshare_group_with_user'),
url(r'^_internal/make-group-membership-request/(?P<group_id>[0-9]+)/(?P<user_id>[0-9]+)/$',
views.make_group_membership_request, name='make_group_membership_request'),
url(r'^_internal/make-group-membership-request/(?P<group_id>[0-9]+)/$',
views.make_group_membership_request, name='make_group_membership_request'),
url(r'^_internal/act-on-group-membership-request/(?P<membership_request_id>[0-9]+)/(?P<action>[a-z]+)/$',
views.act_on_group_membership_request, name='act_on_group_membership_request'),
url(r'^_internal/group_membership/(?P<token>[-\w]+)/(?P<uidb36>[-\w]+)/(?P<membership_request_id>[0-9]+)/',
views.group_membership,
name='group_membership'),
url(r'^_internal/metadata_review/(?P<shortkey>[0-9a-f-]+)/(?P<action>[a-z]+)/',
views.metadata_review,
name='metadata_review_noauth'),
url(r'^_internal/metadata_review/(?P<shortkey>[0-9a-f-]+)/(?P<action>[a-z]+)/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/',
views.metadata_review,
name='metadata_review'),
url(r'^_internal/get-user-or-group-data/(?P<user_or_group_id>[0-9]+)/(?P<is_group>[a-z]+)$',
views.get_user_or_group_data, name='get_user_or_group_data'),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/publish/$', views.publish),
url(r'^_internal/(?P<shortkey>[0-9a-f-]+)/submit-for-review/$', views.submit_for_review),
url(r'^_internal/create-resource/do/$', views.create_resource, name='create_resource'),
url(r'^_internal/verify-account/$', views.verify_account),
url(r'^_internal/resend_verification_email/$', views.resend_verification_email),
url(r'^_internal/(?P<resource_type>[A-z]+)/supported-file-types/$',
views.get_supported_file_types_for_resource_type, name='resource_type_file_types'),
url(r'^_internal/(?P<resource_type>[A-z]+)/allow-multiple-file/$',
views.is_multiple_file_upload_allowed, name="resource_type_multiple_file_upload"),
url(r'^_internal/search/autocomplete/', autocomplete),
url(r'^_internal/data-store-structure/$', views.resource_folder_hierarchy.data_store_structure),
url(r'^_internal/data-store-folder-zip/$',
views.resource_folder_hierarchy.data_store_folder_zip, name="zip_folder"),
url(r'^_internal/zip-by-aggregation-file/$',
views.resource_folder_hierarchy.zip_aggregation_file, name="zip_by_aggr_file"),
url(r'^_internal/data-store-folder-unzip/$',
views.resource_folder_hierarchy.data_store_folder_unzip),
url(r'^_internal/data-store-create-folder/$',
views.resource_folder_hierarchy.data_store_create_folder),
url(r'^_internal/data-store-add-reference/$',
views.resource_folder_hierarchy.data_store_add_reference),
url(r'^_internal/data-store-edit-reference-url/$',
views.resource_folder_hierarchy.data_store_edit_reference_url),
url(r'^_internal/data-store-move-or-rename/$',
views.resource_folder_hierarchy.data_store_file_or_folder_move_or_rename),
url(r'^_internal/data-store-move-to-folder/$',
views.resource_folder_hierarchy.data_store_move_to_folder),
url(r'^_internal/data-store-rename-file-or-folder/$',
views.resource_folder_hierarchy.data_store_rename_file_or_folder),
url(r'^_internal/data-store-delete-folder/$',
views.resource_folder_hierarchy.data_store_remove_folder),
url(r'^_internal/update_quota_usage/(?P<username>[\w.@+-]+)/$',
views.update_quota_usage, name='update_quota_usage'),
url(r'^_internal/get_tasks_by_user/$', views.get_tasks_by_user, name='get_tasks_by_user'),
url(r'^_internal/get_task/(?P<task_id>[0-9a-f-]+)$', views.get_task, name='get_task'),
url(r'^_internal/abort_task/(?P<task_id>[0-9a-f-]+)$', views.abort_task, name='abort_task'),
url(r'^_internal/dismiss_task/(?P<task_id>[0-9a-f-]+)$', views.dismiss_task, name='dismiss_task'),
url(r'^_internal/set_task_delivered/(?P<task_id>[0-9a-f-]+)$', views.set_task_delivered, name='set_task_delivered'),
]
| {
"content_hash": "605aee5354180f1da3a768935edaad40",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 120,
"avg_line_length": 71.73109243697479,
"alnum_prop": 0.645618556701031,
"repo_name": "hydroshare/hydroshare",
"id": "840c8f45b62363eb2f2a19269771f8f0b33b30e2",
"size": "8536",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hs_core/urls.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183727"
},
{
"name": "Dockerfile",
"bytes": "1433"
},
{
"name": "HTML",
"bytes": "950010"
},
{
"name": "JavaScript",
"bytes": "1450537"
},
{
"name": "Python",
"bytes": "5786593"
},
{
"name": "R",
"bytes": "4904"
},
{
"name": "Shell",
"bytes": "94173"
},
{
"name": "Vue",
"bytes": "32043"
}
],
"symlink_target": ""
} |
from models.group import Group
test_data = [
Group(name="name1", header="header1", footer="footer1"),
Group(name="name2", header="header2", footer="footer2")
]
| {
"content_hash": "043abe9a8a9cc8947d579b42533ad997",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 21.375,
"alnum_prop": 0.6666666666666666,
"repo_name": "rmolewska/Python_course",
"id": "b2be11636746494bda768dafd6233683e8ff3904",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45488"
}
],
"symlink_target": ""
} |
from webmarks.bookmarks.apiviews import add_folder_bookmark_view
from webmarks.bookmarks import viewsets
from django.conf.urls import include
from django.conf.urls import url
from rest_framework import routers
apiRouter = routers.DefaultRouter()
apiRouter.register(r'folders', viewsets.FolderViewSet)
apiRouter.register(r'bookmarks', viewsets.BookmarkViewSet)
apiRouter.register(r'tags', viewsets.TagViewSet)
apiRouter.register(r'archives', viewsets.ArchiveViewSet, base_name='archive')
# apiRouter.register(r'search', viewsets.SearchViewSet)
# apiRouter.register(r'upload', viewsets.FileUploaderViewSet)
# apiRouter.register(r'crawler', viewsets.CrawlerViewSet, base_name='crawler')
# apiRouter.register(r'archive', viewsets.ArchiveViewSet, base_name='archive')
urlpatterns = [
# API V1
url(r'v1/', include(apiRouter.urls, namespace='external_apis')),
url(r'v1/bookmarks/(?P<pk>[\s\d\w().+-_,:&]+)/folders/$',
add_folder_bookmark_view, name="add_folder_to_bookmark"),
url(r'v1/bookmarks/(?P<pk>[\s\d\w().+-_,:&]+)/folders/(?P<id>[\s\d\w().+-_,:&]+)',
add_folder_bookmark_view, name="add_folder_to_bookmark"),
]
| {
"content_hash": "442c5ea801e677cc1cba773c26bd0a24",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 86,
"avg_line_length": 44.15384615384615,
"alnum_prop": 0.7317073170731707,
"repo_name": "EricMuller/mynotes-backend",
"id": "ce532c38b355d293bb158364491bb05977a1b203",
"size": "1148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/webmarks/bookmarks/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
} |
"""
client api
"""
import logging
import logging.handlers
import eventlet
from os.path import normpath, getsize
from simplecfs.message.packet import MakeDirPacket, ListDirPacket,\
ValidDirPacket, StatusDirPacket, RemoveDirPacket, AddFilePacket,\
AddChunkPacket, AddFileCommitPacket, StatFilePacket, DeleteFilePacket,\
DeleteChunkPacket, GetChkPacket, GetChunkPacket, ReportDSPacket,\
GetObjPacket, GetFilePacket
from simplecfs.coder.driver import RSDriver, CRSDriver, ZDriver
from simplecfs.message.network_handler import send_command, recv_command,\
send_data, recv_data
from simplecfs.common.parameters import RET_FAILURE, RET_SUCCESS, CODE_RS,\
CODE_CRS, CODE_Z, CHUNK_OK, CHUNK_MISSING, DS_CONNECTED
# for multithreading
def get_blocks_from_ds(ds_id, chunk_id, blist, block_num, need_data, index):
"""get @blist form a chunk, chunk contain block_num blocks
return a block list store in @need_data
"""
data_list = []
packet = GetChunkPacket(chunk_id, block_num, blist)
msg = packet.get_message()
sock = eventlet.connect((ds_id.split(':')[0],
int(ds_id.split(':')[1])))
sock = sock.makefile('rw')
send_command(sock, msg)
recv = recv_command(sock)
if recv['state'] != RET_SUCCESS:
logging.error('get chunk from ds error: %s', recv['info'])
else:
data = recv_data(sock)
sock.close()
size = len(data)/len(blist)
data_list = [data[i*size:(i+1)*size] for i in range(len(blist))]
for i in range(len(blist)):
need_data[index+i] = data_list[i]
return data_list
class Client(object):
"""client to do request"""
def __init__(self, config, test=False):
"""
@config: ConfigParser() object
@test: for unit test purpose
"""
self._config = config
# init logging
logger = logging.getLogger() # get the 'root' logger
level = getattr(logging, config.get('log', 'log_level'))
logger.setLevel(level)
log_name = config.get('log', 'log_name')
log_max_bytes = config.getint('log', 'log_max_bytes')
log_file_num = config.getint('log', 'log_file_num')
handler = logging.handlers.RotatingFileHandler(log_name,
maxBytes=log_max_bytes,
backupCount=log_file_num)
log_format = logging.Formatter('%(levelname)-8s[%(asctime)s.%(msecs)d]'
'<%(module)s> %(funcName)s:%(lineno)d:'
' %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(log_format)
logger.addHandler(handler)
# init mds information
self._mds_ip = config.get('mds', 'mds_ip')
self._mds_port = config.getint('mds', 'mds_port')
# init file information
self._packet_size = config.get('file', 'packet_size')
self._block_size = config.get('file', 'block_size')
# init current working directory
self._cwd = '/'
# init thread pool
thread_num = config.getint('thread', 'thread_num')
self.pool = eventlet.GreenPool(thread_num)
def _get_sockfd_to_mds(self):
sock = eventlet.connect((self._mds_ip, self._mds_port))
return sock.makefile('rw')
def _get_sockfd_to_ds(self, ds_ip, ds_port):
sock = eventlet.connect((ds_ip, ds_port))
return sock.makefile('rw')
def _get_objkey_from_idx(self, filename, index):
return '%s_obj%d' % (filename, index)
def _get_chkkey_from_idx(self, filename, obj_index, chk_index):
return '%s_obj%d_chk%d' % (filename, obj_index, chk_index)
def _change_to_absolute_path(self, pathname):
"""change the path to absolute path in case of relative pathname"""
# if not absolute path, add _cwd
if not pathname.startswith('/'):
pathname = self._cwd + pathname
# delete '.' and '..'
pathname = normpath(pathname)
return pathname
def mkdir(self, dirname):
"""make directory"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
# change dirname to absolute path
absolute_path = self._change_to_absolute_path(dirname)
if not absolute_path.endswith('/'):
absolute_path += '/'
# make request packet
packet = MakeDirPacket(absolute_path)
msg = packet.get_message()
# get socket to mds
sock = self._get_sockfd_to_mds()
# send request
logging.info('mkdir send msg: %s', msg)
send_command(sock, msg)
# recv response
recv = recv_command(sock)
logging.info('mkdir recv msg: %s', recv)
sock.close()
# check response and return
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.info('mkdir response error: %s', info)
return (state, info)
def rmdir(self, dirname):
"""remove empty directory"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
# change dirname to absolute path
absolute_path = self._change_to_absolute_path(dirname)
if not absolute_path.endswith('/'):
absolute_path += '/'
# check current directory in dirname
if self._cwd.startswith(absolute_path):
logging.info('can not remove directory contain cwd')
state = RET_FAILURE
info = 'can not remvoe directory contain cwd'
return (state, info)
# make request packet
packet = RemoveDirPacket(absolute_path)
msg = packet.get_message()
# get socket to mds
sock = self._get_sockfd_to_mds()
# send request
logging.info('rmdir send msg: %s', msg)
send_command(sock, msg)
# recv response
recv = recv_command(sock)
logging.info('rmdir recv msg: %s', recv)
sock.close()
# check response and return
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.info('rmdir response error: %s', info)
return (state, info)
def listdir(self, dirname):
"""ls directory
return: (state, subfiles)
state: RET_FAILURE/RET_SUCCESS
subfiles: [file1, dir1, ...]
"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
# change dirname to absolute path
absolute_path = self._change_to_absolute_path(dirname)
if not absolute_path.endswith('/'):
absolute_path += '/'
# make request packet
packet = ListDirPacket(absolute_path)
msg = packet.get_message()
# get socket to mds
sock = self._get_sockfd_to_mds()
# send request
logging.info('list dir send msg: %s', msg)
send_command(sock, msg)
# recv response
recv = recv_command(sock)
logging.info('list dir recv msg: %s', recv)
sock.close()
# check response and return
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.info('list dir response error: %s', info)
return (state, info)
def chdir(self, dirname):
"""change directory"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
# change dirname to absolute path
absolute_path = self._change_to_absolute_path(dirname)
if not absolute_path.endswith('/'):
absolute_path += '/'
# make request packet
packet = ValidDirPacket(absolute_path)
msg = packet.get_message()
# get socket to mds
sock = self._get_sockfd_to_mds()
# send request
logging.info('valid dir send msg: %s', msg)
send_command(sock, msg)
# recv response
recv = recv_command(sock)
logging.info('valid dir recv msg: %s', recv)
sock.close()
# check response and return
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.info('change dir error: %s', info)
else:
logging.info('change to dir: %s', absolute_path)
self._cwd = absolute_path
return (state, info)
def getcwd(self):
"""get current working directory"""
return self._cwd
def statdir(self, dirname):
"""stat directory"""
dirname = dirname.strip()
if not dirname.endswith('/'):
dirname += '/'
# change dirname to absolute path
absolute_path = self._change_to_absolute_path(dirname)
if not absolute_path.endswith('/'):
absolute_path += '/'
# make request packet
packet = StatusDirPacket(absolute_path)
msg = packet.get_message()
# get socket to mds
sock = self._get_sockfd_to_mds()
# send request
logging.info('stat dir send msg: %s', msg)
send_command(sock, msg)
# recv response
recv = recv_command(sock)
logging.info('stat dir recv msg: %s', recv)
sock.close()
# check response and return
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.info('stat dir response error: %s', info)
return (state, info)
def _get_code_driver(self, code_info):
"""return a init code driver according to code_info """
block_size = int(code_info['block_size'])
code_type = code_info['type']
code = None
if code_type == CODE_RS:
logging.info('code type rs')
k = int(code_info['k'])
m = int(code_info['m'])
w = int(code_info['w'])
packet_size = int(code_info['packet_size'])
code = RSDriver(k=k, m=m, w=w, packet_size=packet_size,
block_size=block_size)
elif code_type == CODE_CRS:
logging.info('code type crs')
k = int(code_info['k'])
m = int(code_info['m'])
w = int(code_info['w'])
packet_size = int(code_info['packet_size'])
code = CRSDriver(k=k, m=m, w=w, packet_size=packet_size,
block_size=block_size)
elif code_type == CODE_Z:
logging.info('code type zcode')
k = int(code_info['k'])
m = int(code_info['m'])
packet_size = int(code_info['packet_size'])
code = ZDriver(k=k, m=m, packet_size=packet_size,
block_size=block_size)
return code
def _send_chunk_to_ds(self, chunk_id, chunk_data, ds_id):
packet = AddChunkPacket(chunk_id, len(chunk_data))
msg = packet.get_message()
print ds_id.split(':')[0],int(ds_id.split(':')[1])
print '%s' % ('aaaaaaa')
sock = self._get_sockfd_to_ds(ds_id.split(':')[0],int(ds_id.split(':')[1]))
send_command(sock, msg)
# sending data
send_data(sock, chunk_data)
recv = recv_command(sock)
logging.info('send chunk to ds recv: %s', recv)
sock.close()
return recv['state']
def _add_file_commit(self, filename):
"""call after add file"""
state = RET_SUCCESS
info = 'ok'
packet = AddFileCommitPacket(filename)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
logging.info('add file commit :%s', msg)
send_command(sock, msg)
recv = recv_command(sock)
logging.info('add file commit recv: %s', recv)
sock.close()
state = recv['state']
if state == RET_FAILURE:
info = 'add file commit error'
if state == RET_SUCCESS:
info = 'ok'
return (state, info)
def putfile(self, src_path, des_path, code_info={}): # NOQA
"""put local @src_path file to remote @des_path with @code_info"""
state = RET_SUCCESS
info = 'ok'
# get the local src_path information(filesize)
try:
filesize = getsize(src_path)
except OSError:
logging.error('no such file in local: %s', src_path)
state = RET_FAILURE
info = 'no such file in local'
return (state, info)
# set the fileinfo
fileinfo = {}
fileinfo['filesize'] = filesize
code = { # default code info
'type': CODE_RS,
'k': 3,
'm': 2,
'w': 8,
'packet_size': self._packet_size,
'block_size': self._block_size,
}
for (key, value) in code_info.items():
code[key] = value
fileinfo['code'] = code
# call add file to mds with des_path and fileinfo
filename = self._change_to_absolute_path(des_path)
packet = AddFilePacket(filename, fileinfo)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
logging.info('put file send to mds: %s', msg)
send_command(sock, msg)
# recv the mds response
recv = recv_command(sock)
sock.close()
logging.info('put file recv from mds: %s', recv)
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.error('put file recv from mds error')
return (state, info)
# get the objects and chunks ds information
object_size = info['object_size']
object_num = info['object_num']
chunk_size = info['chunk_size']
ds_list = info['objects']
driver = self._get_code_driver(code)
fd = open(src_path, 'r')
for obj_idx in range(object_num):
# split file to object
data = fd.read(object_size)
if len(data) < object_size:
data += ' ' * (object_size - len(data))
# encode object to chunks
(state, chunks) = driver.encode(data)
if state == RET_FAILURE:
logging.error('driver encode error')
info = 'driver encode error'
return (state, info)
data_chunk_num = driver.get_data_chunk_num()
parity_chunk_num = driver.get_parity_chunk_num()
# put chunks to ds
# print data_chunk_num,'wwwwwwwwwwwww'
# print ds_list,' qqq'
for data_idx in range(data_chunk_num):
chunk_id = self._get_chkkey_from_idx(filename,
obj_idx,
data_idx)
chunk_data = chunks[0][data_idx*chunk_size:
(data_idx+1)*chunk_size]
print obj_idx,' eee'
ds_id = ds_list[obj_idx][data_idx]
self.pool.spawn_n(self._send_chunk_to_ds, chunk_id,
chunk_data, ds_id)
for parity_idx in range(parity_chunk_num):
chunk_id = self._get_chkkey_from_idx(filename,
obj_idx,
parity_idx+data_chunk_num)
chunk_data = chunks[1][parity_idx*chunk_size:
(parity_idx+1)*chunk_size]
ds_id = ds_list[obj_idx][parity_idx+data_chunk_num]
self.pool.spawn_n(self._send_chunk_to_ds, chunk_id,
chunk_data, ds_id)
# wait for write end
self.pool.waitall()
fd.close()
# commit to mds
if state == RET_SUCCESS:
(state, info) = self._add_file_commit(filename)
if state == RET_SUCCESS:
info = 'ok'
return (state, info)
def delfile(self, path):
"""delete a file"""
filename = self._change_to_absolute_path(path)
# delete meta data in mds
packet = DeleteFilePacket(filename)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
logging.info('stat file send to mds: %s', msg)
send_command(sock, msg)
recv = recv_command(sock)
logging.info('stat file recv %s', recv)
sock.close()
state = recv['state']
info = recv['info']
# delete data chunk in ds
for item in info:
chunk_id = item[0]
ds_id = item[1]
packet = DeleteChunkPacket(chunk_id)
msg = packet.get_message()
sock = self._get_sockfd_to_ds(ds_id.split(':')[0],int(ds_id.split(':')[1]))
send_command(sock, msg)
recv = recv_command(sock)
state = recv['state']
if state == RET_FAILURE:
logging.error('delete chunk in ds: %s %s', chunk_id, ds_id)
info = 'delete chunk ds error'
if state == RET_SUCCESS:
info = 'ok'
return (state, info)
def statfile(self, path):
"""stat a file"""
filename = self._change_to_absolute_path(path)
packet = StatFilePacket(filename)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
logging.info('stat file send to mds: %s', msg)
send_command(sock, msg)
recv = recv_command(sock)
logging.info('stat file recv %s', recv)
sock.close()
state = recv['state']
info = recv['info']
return (state, info)
def _get_object(self, chunks_info, object_id, driver):
"""get obejct according to chunks_info and driver,
return (state, data)"""
data_chunk_num = driver.get_data_chunk_num()
# check the chunk status
available_chunk = []
missing_chunk = []
chunk_num = len(chunks_info)
for index in range(chunk_num):
item_info = chunks_info[index]
state = item_info['status']
if item_info['ds_info']['status'] != DS_CONNECTED:
state == CHUNK_MISSING
if state == CHUNK_OK:
chk_id = '%s_chk%d' % (object_id, index)
ds_id = item_info['ds_id']
available_chunk.append((chk_id, ds_id))
else:
missing_chunk.append(index)
# set the available_chunk and available_list
if len(available_chunk) < data_chunk_num:
logging.error('available_chunk less than data chunk num')
return (RET_FAILURE, '')
task = []
block_num = driver.get_block_num()
block_list = []
for index in range(data_chunk_num):
ds_id = available_chunk[index][1]
chk_id = available_chunk[index][0]
chk_index = int(chk_id.rsplit('_chk')[1])
blist = range(block_num)
task.append((ds_id, chk_id, blist, block_num))
block_list += range(chk_index*block_num, (chk_index+1)*block_num)
task_data = block_list[:]
# multithreading read blocks
index = 0
for item in task:
self.pool.spawn_n(get_blocks_from_ds, item[0], item[1], item[2],
item[3], task_data, index)
index += len(item[2])
self.pool.waitall()
# decode object
(state, data) = driver.decode(task_data, block_list)
if state == RET_FAILURE:
logging.error('decode error')
return (RET_FAILURE, '')
return (state, data)
def getfile(self, des_path, local_path, repair_flag=False, test_flag=False):
"""get file from @des_path to @local_path,
if repair_flag is True, repair missing chunks
"""
logging.info('get file: %s to %s', des_path, local_path)
state = RET_SUCCESS
info = 'ok'
filename = self._change_to_absolute_path(des_path)
packet = GetFilePacket(filename)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
logging.info('get file send to mds: %s', msg)
send_command(sock, msg)
recv = recv_command(sock)
sock.close()
state = recv['state']
info = recv['info']
# check the file info
if state == RET_FAILURE:
logging.error('get file recv from mds: %s', recv)
return (state, info)
# init the code driver
driver = self._get_code_driver(info['code'])
# get each object and write to file
data_need_len = info['filesize']
object_num = info['object_num']
fd = open(local_path, 'w')
for obj_idx in range(object_num):
object_id = '%s_obj%d' % (filename, obj_idx)
chunks_info = info['objects'][obj_idx]
(state, data) = self._get_object(chunks_info, object_id, driver)
if state == RET_FAILURE:
logging.error('get object %s error', object_id)
info = 'get object error'
break
data_len = len(data)
if data_len > data_need_len:
data_len = data_need_len
data = data[:data_need_len]
data_need_len -= data_len
if not test_flag:
fd.write(data)
# write file
fd.close()
if state == RET_SUCCESS:
info = 'ok'
return (state, info)
def getobject(self, object_id, local_path, repair_flag=False):
"""get object from @des_path to @local_path,
if repair_flag is True, repair missing objects
"""
logging.info('get object: %s to %s', object_id, local_path)
state = RET_SUCCESS
info = 'ok'
packet = GetObjPacket(object_id)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
send_command(sock, msg)
recv = recv_command(sock)
sock.close()
state = recv['state']
info = recv['info']
if state == RET_FAILURE:
logging.error('get object recv from mds: %s', recv)
return (state, info)
# init the code driver
driver = self._get_code_driver(info['code'])
chunks_info = info['chunks']
(state, data) = self._get_object(chunks_info, object_id, driver)
if state == RET_FAILURE:
logging.error('decode error')
info = 'decode error'
return (RET_FAILURE, info)
# write to disk
fd = open(local_path, 'w')
fd.write(data)
fd.close()
if state == RET_SUCCESS:
info = 'ok'
return (state, info)
def _get_one_chunk_from_ds(self, ds_id, chunk_id):
"""get one chunk"""
data = ''
packet = GetChunkPacket(chunk_id, 1, [0]) # get all blocks in one chunk
msg = packet.get_message()
sock = self._get_sockfd_to_ds(ds_id.split(':')[0],
int(ds_id.split(':')[1]))
send_command(sock, msg)
recv = recv_command(sock)
if recv['state'] != RET_SUCCESS:
logging.error('get chunk from ds error: %s', recv['info'])
else:
data = recv_data(sock)
sock.close()
return data
def _get_blocks_from_ds(self, ds_id, chunk_id, blist, block_num):
"""get @blist form a chunk, chunk contain block_num blocks
return a block list
"""
data_list = []
packet = GetChunkPacket(chunk_id, block_num, blist)
msg = packet.get_message()
sock = self._get_sockfd_to_ds(ds_id.split(':')[0],
int(ds_id.split(':')[1]))
send_command(sock, msg)
recv = recv_command(sock)
if recv['state'] != RET_SUCCESS:
logging.error('get chunk from ds error: %s', recv['info'])
else:
data = recv_data(sock)
sock.close()
size = len(data)/len(blist)
data_list = [data[i*size:(i+1)*size] for i in range(len(blist))]
return data_list
def _degrade_get_chunk(self, stripe_info, chunk_id): # NOQA
"""repair chunk from other chunks"""
data = ''
driver = self._get_code_driver(stripe_info['code'])
available_chunk = {}
missing_chunk = []
object_id = chunk_id.rsplit('_chk')[0]
chunks_info = stripe_info['chunks']
chunk_num = len(chunks_info)
for index in range(chunk_num):
item_info = chunks_info[index]
state = item_info['status']
if item_info['ds_info']['status'] != DS_CONNECTED:
state = CHUNK_MISSING
if state == CHUNK_OK:
chk_id = '%s_chk%d' % (object_id, index)
ds_id = item_info['ds_id']
available_chunk[chk_id] = ds_id
else:
missing_chunk.append(index)
repair_indexes = []
exclude_indexes = []
code_type = driver.get_type()
chunk_index = int(chunk_id.rsplit('_chk')[1])
if code_type == CODE_RS:
repair_indexes.append(chunk_index)
exclude_indexes = missing_chunk
elif code_type == CODE_CRS:
repair_indexes = range(chunk_index*driver.w,
(chunk_index+1)*driver.w)
for index in missing_chunk:
mlist = range(index*driver.w, (index+1)*driver.w)
exclude_indexes += mlist
elif code_type == CODE_Z:
repair_indexes = chunk_index
if len(missing_chunk) > 1:
logging.error('zcode missing chunk > 1')
return data
(state, need_list) = driver.repair_needed_blocks(repair_indexes,
exclude_indexes)
if state == RET_FAILURE:
logging.error('repair needed blocks return error')
return data
# get need data from chunks
block_num = driver.get_block_num()
blist = []
chunk_idx = -1 # start num
task = []
for index in need_list:
new_chunk_idx = index / block_num
if chunk_idx < 0:
chunk_idx = new_chunk_idx
if new_chunk_idx != chunk_idx:
chk_id = '%s_chk%d' % (object_id, chunk_idx)
ds_id = available_chunk[chk_id]
task.append([ds_id, chk_id, blist, block_num])
blist = []
chunk_idx = new_chunk_idx
blist.append(index % block_num)
else:
blist.append(index % block_num)
# get last chunk blocks
chk_id = '%s_chk%d' % (object_id, chunk_idx)
ds_id = available_chunk[chk_id]
task.append([ds_id, chk_id, blist, block_num])
task_data = need_list[:]
# multithreading read blocks
index = 0
for item in task:
self.pool.spawn_n(get_blocks_from_ds, item[0], item[1], item[2],
item[3], task_data, index)
index += len(item[2])
self.pool.waitall()
need_data = task_data
# repair chunk
(state, data) = driver.repair(need_data, need_list, repair_indexes)
if state == RET_FAILURE:
logging.error('repair error')
data = ''
return data
def getchunk(self, chunk_id, local_path, repair_flag=False,
test_flag=False):
"""get chunk from @des_path to @local_path,
if repair_flag is True, repair missing chunks
"""
logging.info('get chunk: %s to %s', chunk_id, local_path)
state = RET_SUCCESS
info = 'ok'
packet = GetChkPacket(chunk_id)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
send_command(sock, msg)
recv = recv_command(sock)
state = recv['state']
info = recv['info']
sock.close()
if state == RET_FAILURE:
logging.error('get chunk recv from mds: %s', recv)
return (state, info)
# check chunk status
chunk_idx = int(chunk_id.rsplit('_chk')[1])
chunks_info = info['chunks']
chunk_info = chunks_info[chunk_idx]
chunk_state = chunk_info['status']
ds_id = chunk_info['ds_id']
if chunk_info['ds_info']['status'] != DS_CONNECTED:
chunk_state = CHUNK_MISSING
if chunk_state != CHUNK_OK:
# degrade chunk get
data = self._degrade_get_chunk(recv['info'], chunk_id)
else:
# get data from chunk
data = self._get_one_chunk_from_ds(ds_id, chunk_id)
if not data:
info = 'get chunk from ds error'
state == RET_FAILURE
else:
if not test_flag:
fd = open(local_path, 'w')
fd.write(data)
fd.close()
if state == RET_SUCCESS:
info = 'ok'
return (state, info)
def get_chunk_ds_id(self, chunk_id):
ds_ip = ''
ds_port = 0
logging.info('get chunk: %s', chunk_id)
packet = GetChkPacket(chunk_id)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
send_command(sock, msg)
recv = recv_command(sock)
state = recv['state']
info = recv['info']
sock.close()
if state == RET_FAILURE:
logging.error('get chunk recv from mds: %s', recv)
return (ds_ip, ds_port)
# get chunk ds_id
chunk_idx = int(chunk_id.rsplit('_chk')[1])
chunks_info = info['chunks']
chunk_info = chunks_info[chunk_idx]
ds_id = chunk_info['ds_id']
ds_ip = ds_id.split(':')[0]
ds_port = int(ds_id.split(':')[1])
return (ds_ip, ds_port)
def report_ds(self, ds_ip, ds_port, status=DS_CONNECTED):
info = {
'status': status,
}
packet = ReportDSPacket(ds_ip, ds_port, info)
msg = packet.get_message()
sock = self._get_sockfd_to_mds()
logging.info('report ds :%s', msg)
send_command(sock, msg)
recv = recv_command(sock)
logging.info('reprot ds recv: %s', recv)
sock.close()
return recv['state']
| {
"content_hash": "6729053d17be6f84a92a49af43009b4f",
"timestamp": "",
"source": "github",
"line_count": 918,
"max_line_length": 87,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.526437908496732,
"repo_name": "charley-ye/SCFS-v1",
"id": "4f7e1f6f3826777d8026b3ce72699cdc8a09887b",
"size": "30624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lib.linux-x86_64-2.7/client/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1070"
},
{
"name": "C",
"bytes": "4730336"
},
{
"name": "C++",
"bytes": "60817"
},
{
"name": "CSS",
"bytes": "1647"
},
{
"name": "HTML",
"bytes": "285074"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "M4",
"bytes": "67004"
},
{
"name": "Makefile",
"bytes": "47327"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "398312"
},
{
"name": "Roff",
"bytes": "7236"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "36566"
},
{
"name": "Smarty",
"bytes": "1047"
},
{
"name": "Tcl",
"bytes": "423789"
},
{
"name": "XSLT",
"bytes": "303"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0067_auto_20181102_1054")]
operations = [
migrations.AddField(
model_name="order",
name="checkout_token",
field=models.CharField(blank=True, max_length=36),
)
]
| {
"content_hash": "cecf51fc04960e0a1b94bdf4fef4f2c8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 24,
"alnum_prop": 0.6041666666666666,
"repo_name": "maferelo/saleor",
"id": "95cb5bce8f6ebe2df5ec47fe92abca97eaca2be4",
"size": "385",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "saleor/order/migrations/0068_order_checkout_token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64217"
},
{
"name": "HTML",
"bytes": "394723"
},
{
"name": "JavaScript",
"bytes": "61157"
},
{
"name": "Python",
"bytes": "585270"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
""" Task 702 """
def closest_pair(clusters, distance, cluster_distance, distances_cache):
""" Zwraca parę grup o najmniejszej wzajemnej odległości... """
best_i = None
best_j = None
best_d = None
for i in xrange(len(clusters)):
cluster_a = clusters[i]
for j in xrange(i + 1, len(clusters)):
cluster_b = clusters[j]
dict_key = (cluster_a.cluster_id, cluster_b.cluster_id)
if dict_key not in distances_cache:
distances_cache[dict_key] = \
cluster_distance(cluster_a, cluster_b, distance)
tmp = distances_cache[dict_key]
if best_d is None or best_d > tmp:
best_d = tmp
best_i = i
best_j = j
return (best_i, best_j, best_d) | {
"content_hash": "800f21e1c45f7c4babd75bbb8b6bdd99",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 37.5,
"alnum_prop": 0.5430303030303031,
"repo_name": "katzoo/amu",
"id": "94eac09968e2060de069c1c02cd61f6ac63964b9",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isi/cluster/Task702.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Bison",
"bytes": "12146"
},
{
"name": "Java",
"bytes": "78450"
},
{
"name": "Perl",
"bytes": "2597"
},
{
"name": "Python",
"bytes": "26282"
}
],
"symlink_target": ""
} |
karesansui_conf = '/etc/karesansui/application.conf'
pysilhouette_conf = '/etc/pysilhouette/silhouette.conf'
search_path = ''
| {
"content_hash": "1ba53378736ccb4baa5a69c4128c158b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 55,
"avg_line_length": 42,
"alnum_prop": 0.7698412698412699,
"repo_name": "karesansui/karesansui",
"id": "8a8623bf510b4446a030e89236ca5051dc974fcb",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bin/__cmd__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79865"
},
{
"name": "HTML",
"bytes": "32774"
},
{
"name": "JavaScript",
"bytes": "286445"
},
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "2226164"
},
{
"name": "Shell",
"bytes": "18293"
}
],
"symlink_target": ""
} |
"""
Flask-Mail
----------
A Flask extension for sending email messages.
Please refer to the online documentation for details.
Links
`````
* `documentation <http://packages.python.org/Flask-Mail>`_
"""
from setuptools import setup
setup(
name='Flask-Mail',
version='0.9.1',
url='https://github.com/rduplain/flask-mail',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Ron DuPlain',
maintainer_email='ron.duplain@gmail.com',
description='Flask extension for sending email',
long_description=__doc__,
py_modules=[
'flask_mail'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'blinker',
],
tests_require=[
'nose',
'blinker',
'speaklater',
'mock',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"content_hash": "1036242bd36d1fa12b5996d9f8b5e620",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 70,
"avg_line_length": 23.814814814814813,
"alnum_prop": 0.5933125972006221,
"repo_name": "gerasim13/flask-mail",
"id": "8354de8276ae1f30a04292841ba4b7835cf7fc55",
"size": "1286",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "52919"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from huxley.accounts.exceptions import AuthenticationError, PasswordChangeFailed
from huxley.accounts.models import User
from huxley.utils.test import TestUsers
class UserTestCase(TestCase):
def test_authenticate(self):
'''It should correctly authenticate and return a user, or return an
error message.'''
kunal = User.objects.create(username='kunal', email='kunal@lol.lol')
kunal.set_password('mehta')
kunal.is_active = False
kunal.save()
def assert_raises(username, password, message):
with self.assertRaises(AuthenticationError):
try:
User.authenticate(username, password)
except AuthenticationError as e:
self.assertEqual(str(e), message)
raise
assert_raises('kunal', '', AuthenticationError.MISSING_FIELDS)
assert_raises('', 'mehta', AuthenticationError.MISSING_FIELDS)
assert_raises('kunal', 'm', AuthenticationError.INVALID_CREDENTIALS)
assert_raises('k', 'mehta', AuthenticationError.INVALID_CREDENTIALS)
assert_raises('kunal', 'mehta', AuthenticationError.INACTIVE_ACCOUNT)
kunal.is_active = True
kunal.save();
user = User.authenticate('kunal', 'mehta')
self.assertEqual(user, kunal)
def test_change_password(self):
'''It should correctly change a user's password or raise an error.'''
user = User.objects.create(username='adavis', email='lol@lol.lol')
user.set_password('old&busted')
user.save()
def assert_raises(old_password, new_password, message):
with self.assertRaises(PasswordChangeFailed):
try:
user.change_password(old_password, new_password)
except PasswordChangeFailed as e:
self.assertEqual(str(e), message)
self.assertTrue(user.check_password('old&busted'))
raise
assert_raises('', 'newhotness',
PasswordChangeFailed.MISSING_FIELDS)
assert_raises('old&busted', '',
PasswordChangeFailed.MISSING_FIELDS)
assert_raises('old&busted', 'a',
PasswordChangeFailed.PASSWORD_TOO_SHORT)
assert_raises('old&busted', 'invalid>hotness',
PasswordChangeFailed.INVALID_CHARACTERS)
assert_raises('wrong&busted', 'newhotness',
PasswordChangeFailed.INCORRECT_PASSWORD)
user.change_password('old&busted', 'newhotness')
self.assertTrue(user.check_password('newhotness'))
def test_reset_password(self):
'''It should correctly reset a user's password or raise an error.'''
password = 'password'
user = TestUsers.new_user(username='lololol', password=password)
self.assertTrue(user.check_password(password))
User.reset_password('lololol')
user = User.objects.get(id=user.id)
self.assertFalse(user.check_password(password))
with self.assertRaises(User.DoesNotExist):
TestUsers.new_user(username='', email='')
User.reset_password('')
| {
"content_hash": "9b653413ff9ec5facae3659bff631d18",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 41.0126582278481,
"alnum_prop": 0.6231481481481481,
"repo_name": "jmosky12/huxley",
"id": "0cdc19a2dffd822af02ce39702e263629dde229a",
"size": "3389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huxley/accounts/tests/test_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Command for adding a backend to a backend service."""
import copy
from googlecloudsdk.api_lib.compute import backend_services_utils
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.backend_services import backend_flags
from googlecloudsdk.command_lib.compute.backend_services import flags
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AddBackend(base_classes.ReadWriteCommand):
"""Add a backend to a backend service."""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
backend_flags.AddInstanceGroup(
parser, operation_type='add to',
with_deprecated_zone=True)
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
@property
def service(self):
if self.regional:
return self.compute.regionBackendServices
return self.compute.backendServices
@property
def resource_type(self):
if self.regional:
return 'regionBackendServices'
return 'backendServices'
def CreateReference(self, args):
return flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.ResolveAsResource(
args, self.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL)
def GetGetRequest(self, args):
if self.regional:
return (self.service,
'Get',
self.messages.ComputeRegionBackendServicesGetRequest(
backendService=self.ref.Name(),
region=self.ref.region,
project=self.project))
return (self.service,
'Get',
self.messages.ComputeBackendServicesGetRequest(
backendService=self.ref.Name(),
project=self.project))
def GetSetRequest(self, args, replacement, existing):
if self.regional:
return (self.service,
'Update',
self.messages.ComputeRegionBackendServicesUpdateRequest(
backendService=self.ref.Name(),
backendServiceResource=replacement,
region=self.ref.region,
project=self.project))
return (self.service,
'Update',
self.messages.ComputeBackendServicesUpdateRequest(
backendService=self.ref.Name(),
backendServiceResource=replacement,
project=self.project))
def CreateGroupReference(self, args):
return instance_groups_utils.CreateInstanceGroupReference(
scope_prompter=self,
compute=self.compute,
resources=self.resources,
name=args.instance_group,
region=args.instance_group_region,
zone=(args.instance_group_zone
if args.instance_group_zone else args.zone),
zonal_resource_type='instanceGroups',
regional_resource_type='regionInstanceGroups')
def CreateBackendMessage(self, group_uri, balancing_mode, args):
"""Create a backend message.
Args:
group_uri: String. The backend instance group uri.
balancing_mode: Backend.BalancingModeValueValuesEnum. The backend load
balancing mode.
args: argparse Namespace. The arguments given to the add-backend command.
Returns:
A new Backend message with its fields set according to the given
arguments.
"""
backend_services_utils.ValidateBalancingModeArgs(self.messages, args)
return self.messages.Backend(
balancingMode=balancing_mode,
capacityScaler=args.capacity_scaler,
description=args.description,
group=group_uri,
maxRate=args.max_rate,
maxRatePerInstance=args.max_rate_per_instance,
maxUtilization=args.max_utilization,
maxConnections=args.max_connections,
maxConnectionsPerInstance=args.max_connections_per_instance)
def Modify(self, args, existing):
backend_flags.WarnOnDeprecatedFlags(args)
replacement = copy.deepcopy(existing)
group_ref = self.CreateGroupReference(args)
group_uri = group_ref.SelfLink()
for backend in existing.backends:
if group_uri == backend.group:
raise exceptions.ToolException(
'Backend [{0}] in zone [{1}] already exists in backend service '
'[{2}].'.format(group_ref.Name(),
group_ref.zone,
args.name))
if args.balancing_mode:
balancing_mode = self.messages.Backend.BalancingModeValueValuesEnum(
args.balancing_mode)
else:
balancing_mode = None
backend = self.CreateBackendMessage(group_uri, balancing_mode, args)
replacement.backends.append(backend)
return replacement
def Run(self, args):
self.regional = backend_services_utils.IsRegionalRequest(args)
return super(AddBackend, self).Run(args)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class AddBackendBeta(AddBackend):
"""Add a backend to a backend service."""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
backend_flags.AddInstanceGroup(
parser, operation_type='add to',
with_deprecated_zone=True)
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
def CreateGroupReference(self, args):
"""Overrides."""
return instance_groups_utils.CreateInstanceGroupReference(
scope_prompter=self,
compute=self.compute,
resources=self.resources,
name=args.instance_group,
region=args.instance_group_region,
zone=(args.instance_group_zone
if args.instance_group_zone else args.zone),
zonal_resource_type='instanceGroups',
regional_resource_type='regionInstanceGroups')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AddBackendAlpha(AddBackendBeta):
"""Add a backend to a backend service."""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
flags.MULTISCOPE_INSTANCE_GROUP_ARG.AddArgument(
parser, operation_type='add')
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
def CreateGroupReference(self, args):
"""Overrides."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
return flags.MULTISCOPE_INSTANCE_GROUP_ARG.ResolveAsResource(
args, holder.resources,
default_scope=compute_scope.ScopeEnum.ZONE,
scope_lister=compute_flags.GetDefaultScopeLister(
holder.client, self.project))
AddBackend.detailed_help = {
'brief': 'Add a backend to a backend service',
'DESCRIPTION': """
*{command}* is used to add a backend to a backend service. A
backend is a group of tasks that can handle requests sent to a
backend service. Currently, the group of tasks can be one or
more Google Compute Engine virtual machine instances grouped
together using an instance group.
Traffic is first spread evenly across all virtual machines in
the group. When the group is full, traffic is sent to the next
nearest group(s) that still have remaining capacity.
To modify the parameters of a backend after it has been added
to the backend service, use
`gcloud compute backend-services update-backend` or
`gcloud compute backend-services edit`.
""",
}
AddBackendAlpha.detailed_help = AddBackend.detailed_help
AddBackendBeta.detailed_help = AddBackend.detailed_help
| {
"content_hash": "c97c6934aeb34ef1daa07492b64eccf9",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 36.30316742081448,
"alnum_prop": 0.696622211142964,
"repo_name": "KaranToor/MA450",
"id": "5b6d0d7f8d786a9a4fc11f5c94d61ccf8d29a3de",
"size": "8619",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/compute/backend_services/add_backend.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
"""
Script to process article database into cleaned dataset.
"""
import re
import pandas as pd
def fix_separators(s: str) -> str:
"""Return s with commas inserted between fields."""
upper_than_lower = re.compile(r'([a-z])([A-Z])')
return upper_than_lower.sub(r'\1,\2', s)
def clean_data(articles: pd.DataFrame) -> pd.DataFrame:
"""Return cleaned dataframe of articles."""
articles['authors'] = articles['authors'].apply(fix_separators)
articles['tags'] = articles['tags'].apply(fix_separators)
return articles
| {
"content_hash": "318b201bde76cc72eb64ee9743639a4d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 28.57894736842105,
"alnum_prop": 0.6740331491712708,
"repo_name": "bmassman/fake_news",
"id": "c25958c65ffa6e8d8cd99025d5050c603611051d",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fake_news/pipeline/db_cleaner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70735"
}
],
"symlink_target": ""
} |
import time
from bs4 import BeautifulSoup
import json
import codecs
import urllib2
import urllib
import httplib2
import contextlib
import re, urlparse
import os
def get_page(url):
text = ''
with contextlib.closing(urllib.urlopen(url)) as opened:
text = opened.read()
return text
def username_format(username):
formatted = username.replace(' ', '+').encode('UTF-8')
return formatted
def parse_user_details(text):
soup = BeautifulSoup(text)
divs = soup.findAll('div', {'class': 'user-details'})
so_users = []
for div in divs:
user_url = div.a['href']
username = div.a.get_text()
so_users.append((user_url, username))
soup.decompose()
return so_users
users_file = codecs.open('github-users-stats.json', encoding='utf-8')
data = json.load(users_file)
unique = 0
ambiguous = 0
i = -1
users = []
for user in data:
i += 1
result_file = 'git-so/{}'.format(user['login'])
if os.path.isfile(result_file):
continue
url = 'http://stackoverflow.com/users?&search={}'.format(user['login'])
print url,
text = get_page(url)
so_users = parse_user_details(text)
if len(so_users) == 0:
url = 'http://stackoverflow.com/users?&search={}'.format(username_format(user['name']))
print url,
text = get_page(url)
so_users = parse_user_details(text)
print '{0:.2f}%'.format(i * 100. / len(data))
result = {
'login': user['login'],
'username': user['name'],
'so_users': so_users
}
users.append(result)
output = open(result_file, 'w+')
output.write(json.dumps(result, indent=4))
output.close()
# print 'All: {}; Unique: {}; Ambiguous: {}'.format(len(data), unique, ambiguous)
print 'Success' | {
"content_hash": "c8e84437607b1b48eb0d2f7a2aef17d3",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 89,
"avg_line_length": 22.91549295774648,
"alnum_prop": 0.6742470805162877,
"repo_name": "n43jl/mdp",
"id": "46569ec6f53536f845932d5810a170400f6e329a",
"size": "1669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top-github/github_so_correlation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24114"
},
{
"name": "Shell",
"bytes": "1231"
}
],
"symlink_target": ""
} |
"""
$Id$
$URL$
Copyright (c) 2010 foption
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@since Jan 12, 2011
@author Mario Steinhoff
"""
"""
import gdata.calendar.client
def PrintUserCalendars(calendar_client):
feed = calendar_client.GetAllCalendarsFeed()
print feed.title.text
for i, a_calendar in enumerate(feed.entry):
print '\t%s. %s' % (i, a_calendar.title.text,)
calendar_client = gdata.calendar.client.CalendarClient(source='foption-fptbot-1.0')
calendar_client.ssl = True
calendar_client.ClientLogin('fptbot@googlemail.com', 'deinemudderfistedbots1337', calendar_client.source);
PrintUserCalendars(calendar_client)
"""
import urllib
import re
import htmlentitydefs
import datetime
from objects.facts import Fact
class SchoelnastAtFactSource(object):
"""
A data source for wissen.schoelnast.at.
"""
URL = 'http://wissen.schoelnast.at/alles.shtml'
LINE_PATTERN = r'<tr><td class="li"><a(.*)>(?P<date>\d{1,2}\.\d{1,2}\.\d{4})</a></td><td class="re2">(?P<fact>.*)<span class="blau">'
LINE_DATE_FORMAT = '%d.%m.%Y'
ENTITY_NAME_PATTERN = r'&({0});'.format('|'.join(htmlentitydefs.name2codepoint))
ENTITY_CP_PATTERN = r'&#(\d{1,4});'
def __init__(self):
self.line_matcher = re.compile(self.LINE_PATTERN)
self.entity_name_matcher = re.compile(self.ENTITY_NAME_PATTERN)
self.entity_cp_matcher = re.compile(self.ENTITY_CP_PATTERN)
def _entity2unichr(self, input):
"""
# TODO optimize
"""
def name_converter(entity_match):
return unichr(htmlentitydefs.name2codepoint[entity_match.group(1)])
def cp_converter(entity_match):
return unichr(int(entity_match.group(1)))
output = re.sub(self.entity_name_matcher, name_converter, input)
output = re.sub(self.entity_cp_matcher, cp_converter, output)
return output
def get_data(self):
stream = urllib.urlopen(self.URL)
raw_lines = stream.readlines()
stream.close()
limit_date = getattr(self, 'limit_date', datetime.date(2000, 1, 1))
new_data = []
for raw_line in raw_lines:
raw_line = raw_line.strip()
result = self.line_matcher.search(raw_line)
if not result:
continue
date = datetime.datetime.strptime(result.group('date'), self.LINE_DATE_FORMAT).date()
if date < limit_date:
continue
fact = Fact()
fact.date = date
fact.text = self._entity2unichr(result.group('fact')).strip()
new_data.append(fact)
return new_data
| {
"content_hash": "5afb4931183c69e3e8b686301ab58b0a",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 137,
"avg_line_length": 32.347457627118644,
"alnum_prop": 0.6418653392716793,
"repo_name": "msteinhoff/foption-bot",
"id": "f99534cc175ea4ee64d35897d8edb4d49acf49d8",
"size": "3841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/test/language/bottest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "400174"
}
],
"symlink_target": ""
} |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-DomainUser',
'Author': ['@harmj0y'],
'Description': ('Query information for a given user or users in the specified domain. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Identity' : {
'Description' : 'A SamAccountName, DistinguishedName, SID, GUID, or a dns host name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'SPN' : {
'Description' : 'Switch. Only return user objects with non-null service principal names',
'Required' : False,
'Value' : ''
},
'AdminCount' : {
'Description' : 'Switch. Return users with \'(adminCount=1)\' (meaning are/were privileged).',
'Required' : False,
'Value' : ''
},
'AllowDelegation' : {
'Description' : 'Switch. Return user accounts that are not marked as \'sensitive and not allowed for delegation\'',
'Required' : False,
'Value' : ''
},
'TrustedToAuth' : {
'Description' : 'Switch. Return computer objects that are trusted to authenticate for other principals.',
'Required' : False,
'Value' : ''
},
'PreauthNotRequired' : {
'Description' : 'Switch. Return user accounts with "Do not require Kerberos preauthentication" set.',
'Required' : False,
'Value' : ''
},
'DisallowDelegation' : {
'Description' : 'Switch. Return user accounts that are marked as \'sensitive and not allowed for delegation\'',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'LDAPFilter' : {
'Description' : 'Specifies an LDAP query string that is used to filter Active Directory objects.',
'Required' : False,
'Value' : ''
},
'Properties' : {
'Description' : 'Specifies the properties of the output object to retrieve from the server.',
'Required' : False,
'Value' : ''
},
'SearchBase' : {
'Description' : 'The LDAP source to search through, e.g. "LDAP://OU=secret,DC=testlab,DC=local" Useful for OU queries.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'SecurityMasks' : {
'Description' : 'Specifies an option for examining security information of a directory object. One of "Dacl", "Group", "None", "Owner", "Sacl".',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : 'False'
},
'FindOne' : {
'Description' : 'Only return one result object.',
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| {
"content_hash": "aa59837c2359f3c2ad9fcce003ae4f5d",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 165,
"avg_line_length": 41.52272727272727,
"alnum_prop": 0.4616858237547893,
"repo_name": "bneg/Empire",
"id": "c031626e1114013041fc375b7d8dfcf490f2913a",
"size": "7308",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/situational_awareness/network/powerview/get_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "17003288"
},
{
"name": "Python",
"bytes": "2787352"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "akktool.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "8d46b7c277248acddb6ba76d6c75b4dd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6206896551724138,
"repo_name": "HorayNarea/Akktool",
"id": "53164992a83c5689674fa9562f29951b5e1dbc94",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "487"
},
{
"name": "Python",
"bytes": "9909"
},
{
"name": "Shell",
"bytes": "360"
}
],
"symlink_target": ""
} |
"""Tests for showing timestamps."""
from datetime import MAXYEAR, datetime
import pytest
def max_timestamp_for_datetime():
"""Calculate the latest timestamp Python will allow in a datetime."""
max_datetime = datetime(
year=MAXYEAR, month=12, day=31, hour=23, minute=59, second=59
)
return int(max_datetime.timestamp())
def min_timestamp_for_datetime():
"""Return the earliest timestamp Python will allow in a datetime."""
return -62135596800 # 1st Jan 1; calculation in code causes OverflowError
@pytest.mark.parametrize(
"timestamp, expected_strings",
[
(0, ["1 January 1970", "00:00:00 UTC"]),
(1, ["1 January 1970", "00:00:01 UTC"]),
(1234566789, ["13 February 2009"]),
("-0", ["1 January 1970", "00:00:00 UTC"]),
(-1, ["31 December 1969", "23:59:59 UTC"]),
(-1234566789, ["18 November 1930"]),
(
max_timestamp_for_datetime(),
["31 December {}".format(MAXYEAR), "23:59:59 UTC"],
),
(min_timestamp_for_datetime(), ["1 January", "00:00:00 UTC"]),
],
)
def test_timestamp(client, timestamp, expected_strings):
"""Test getting timestamps."""
response = client.get("/{}".format(timestamp))
assert response.status_code == 200
for expected_string in expected_strings:
assert expected_string in response.get_data(as_text=True)
@pytest.mark.parametrize(
"timestamp",
[
max_timestamp_for_datetime() + 1,
min_timestamp_for_datetime() - 1,
9999999999999999,
99999999999999999,
999999999999999999,
],
)
def test_overflow(client, timestamp):
"""Test handling of too large or small dates."""
response = client.get("/{}".format(timestamp))
assert response.status_code == 404
assert str(timestamp) in response.get_data(as_text=True)
| {
"content_hash": "6729d8f61161637f4fb27fdd631fdc7b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 31.559322033898304,
"alnum_prop": 0.6240601503759399,
"repo_name": "craiga/unixtimesta.mp",
"id": "f2afdf1124fb61f45518d4d505b06b86fb523991",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/show_timestamp_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "710"
},
{
"name": "HTML",
"bytes": "7233"
},
{
"name": "Python",
"bytes": "26770"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "Shell",
"bytes": "27"
}
],
"symlink_target": ""
} |
"""Script to plot correlation between distances."""
import lds
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set(style='whitegrid')
num_pairs = 100
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 1)
hidden_state_dim = 2
lds_pairs = [(lds.generate_linear_dynamical_system(hidden_state_dim),
lds.generate_linear_dynamical_system(hidden_state_dim))
for i in xrange(num_pairs)]
lds_distances = [
lds.eig_dist(system1, system2) for (system1, system2) in lds_pairs
]
expected_ar_distances = [
np.linalg.norm(system1.get_expected_arparams() -
system2.get_expected_arparams())
for (system1, system2) in lds_pairs
]
print(np.corrcoef(lds_distances, expected_ar_distances)[0, 1])
ax = sns.regplot(x=lds_distances, y=expected_ar_distances)
ax.set(
xlabel='l-2 distance b/w eigenvalues',
ylabel='l-2 distance b/w '
'corresponding AR params',
title='Hidden dim = 2')
plt.subplot(1, 2, 2)
hidden_state_dim = 3
lds_pairs = [(lds.generate_linear_dynamical_system(hidden_state_dim),
lds.generate_linear_dynamical_system(hidden_state_dim))
for i in xrange(num_pairs)]
lds_distances = [
lds.eig_dist(system1, system2) for (system1, system2) in lds_pairs
]
expected_ar_distances = [
np.linalg.norm(system1.get_expected_arparams() -
system2.get_expected_arparams())
for (system1, system2) in lds_pairs
]
print(np.corrcoef(lds_distances, expected_ar_distances)[0, 1])
ax = sns.regplot(x=lds_distances, y=expected_ar_distances)
ax.set(xlabel='l-2 distance b/w eigenvalues', ylabel='', title='Hidden dim = 3')
plt.gcf().subplots_adjust(bottom=0.15)
plt.show()
| {
"content_hash": "13e49987e4a6bfc61edf75552bc33abc",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 80,
"avg_line_length": 34.673469387755105,
"alnum_prop": 0.6851088875809299,
"repo_name": "google-research/google-research",
"id": "ae6e25c177db5699c4c3ea3bcbc91b7cbb77910b",
"size": "2307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linear_dynamical_systems/plot_dist_correlation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""
Django settings for PhotoManagementSystem project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$6#w&i!ng7e34urp0*g2l^1@@y=!bph3!4*#i$o-#fqi^d=$up'
# SECURITY WARNING: don't run with debug turned on in production!
if 'SERVER_SOFTWARE' in os.environ:
DEBUG = False
else:
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'pmsys.sinaapp.com',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'PhotoManager',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'PhotoManagementSystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'./PhotoManager/templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'PhotoManagementSystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# SQLite
'''DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}'''
# MySQL
'''if 'SERVER_SOFTWARE' in os.environ:
from sae.const import (
MYSQL_HOST, MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB
)
else:
MYSQL_HOST = 'localhost'
MYSQL_PORT = '3306'
MYSQL_USER = 'root'
MYSQL_PASS = 'root'
MYSQL_DB = 'pmsys'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}'''
# SQLite and MySQL
if 'SERVER_SOFTWARE' in os.environ:
from sae.const import (
MYSQL_HOST, MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Login
LOGIN_REDIRECT_URL = '/home/'
LOGIN_URL = '/signin/'
LOGOUT_URL = '/signout/'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = False
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static').replace('\\', '/')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_dev"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| {
"content_hash": "9b05fbe07c84bb0960f0e01846898132",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 71,
"avg_line_length": 24.586206896551722,
"alnum_prop": 0.6372136512388967,
"repo_name": "39M/PhotoTheater",
"id": "7bdd7ef5625da1621f131177934d86f586fde55d",
"size": "4278",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PhotoManagementSystem/PhotoManagementSystem/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "752951"
},
{
"name": "HTML",
"bytes": "72442"
},
{
"name": "JavaScript",
"bytes": "2291250"
},
{
"name": "Python",
"bytes": "94702"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "uidjangotemplate.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "ae98de22be6518e897e05bd5a0e933e6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.625,
"repo_name": "cisco-gve/epnm_alarm_report",
"id": "505487e4b83edca329e437a512f1e166eca711d1",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1125164"
},
{
"name": "HTML",
"bytes": "64143"
},
{
"name": "JavaScript",
"bytes": "67417"
},
{
"name": "Python",
"bytes": "24658"
},
{
"name": "Shell",
"bytes": "180"
}
],
"symlink_target": ""
} |
"""
Models for notifications app.
"""
import json
import jsonpickle
import six
from django.conf import settings
from django.db import models
from django.utils.module_loading import import_string
@six.python_2_unicode_compatible
class SentNotification(models.Model):
"""
Stores info on the notification that was sent.
"""
STATUS_PENDING = 0
STATUS_SUCCESS = 1
STATUS_FAILED = 2
STATUS_USER_DISABLED = 3
STATUSES = ((0, "Pending"), (1, "Success"), (2, "Failed"), (3, "User Disabled"))
text_content = models.TextField(null=True, blank=True)
html_content = models.TextField(null=True, blank=True)
sent_from = models.CharField(max_length=100, null=True, blank=True)
recipients = models.CharField(
max_length=2000
) # Comma separated list of emails or numbers
subject = models.CharField(max_length=255, null=True, blank=True)
extra_data = models.TextField(null=True, blank=True) # json dictionary
date_sent = models.DateTimeField()
status = models.PositiveSmallIntegerField(choices=STATUSES, default=STATUS_PENDING)
notification_class = models.CharField(max_length=255)
error_message = models.TextField(null=True, blank=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, default=None, null=True, on_delete=models.SET_NULL
)
attachments = models.TextField(null=True, blank=True)
def __str__(self):
return self.notification_class
def get_recipients(self):
"""
Return the list of recipients for the notification. Recipient is defined by the notification class.
"""
return self.recipients.split(",")
def resend(self):
"""
Re-sends the notification by calling the notification class' resend method
"""
notification_class = import_string(self.notification_class)
return notification_class.resend(self)
def get_extra_data(self):
"""
Return extra data that was saved
"""
if not self.extra_data:
return {}
else:
return json.loads(self.extra_data)
def get_attachments(self):
if self.attachments:
return jsonpickle.loads(self.attachments)
else:
return None
@six.python_2_unicode_compatible
class Notification(models.Model):
"""
NotificationClasses are created on app init.
"""
notification_class = models.CharField(max_length=255, unique=True)
verbose_name = models.CharField(max_length=255, blank=True, null=True)
can_disable = models.BooleanField(default=True)
def __str__(self):
return self.verbose_name if self.verbose_name else self.notification_class
class UserNotification(models.Model):
"""
Add a User Notification record, then add disabled notifications to disable records.
On your user Admin, add the field user_notification
"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True
)
disabled_notifications = models.ManyToManyField(Notification)
| {
"content_hash": "40c88a24e72b310c2b6e2948c136b9ab",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 107,
"avg_line_length": 30.34313725490196,
"alnum_prop": 0.6759289176090468,
"repo_name": "worthwhile/django-herald",
"id": "3b702e1f21c5b6446349369ad0c0eacdadcf305f",
"size": "3095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "herald/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "887"
},
{
"name": "Python",
"bytes": "69112"
}
],
"symlink_target": ""
} |
from keywordgroup import KeywordGroup
class _CookieKeywords(KeywordGroup):
def delete_all_cookies(self):
"""Deletes all cookies."""
self._current_browser().delete_all_cookies()
def delete_cookie(self, name):
"""Deletes cookie matching `name`.
If the cookie is not found, nothing happens.
"""
self._current_browser().delete_cookie(name)
def get_cookies(self):
"""Returns all cookies of the current page."""
pairs = []
for cookie in self._current_browser().get_cookies():
pairs.append(cookie['name'] + "=" + cookie['value'])
return '; '.join(pairs)
def get_cookie_value(self, name):
"""Returns value of cookie found with `name`.
If no cookie is found with `name`, this keyword fails.
"""
cookie = self._current_browser().get_cookie(name)
if cookie is not None:
return cookie['value']
raise ValueError("Cookie with name %s not found." % name)
def create_cookie(self, cname, cval, cpath = '/'):
"""Creates a cookie with `name`, `value` arguments
Use should be minimized since it weakens test case"""
self._current_browser().add_cookie({'name' : cname, 'value' : cval, 'path' : cpath}) | {
"content_hash": "b375027803169acc72a8b701319c5309",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 92,
"avg_line_length": 35.69444444444444,
"alnum_prop": 0.6023346303501945,
"repo_name": "hbmartin/robotframework-selenium2library",
"id": "07b33859071c95cfc71a69073552cbe59a2a6dba",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Selenium2Library/keywords/_cookie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "9719"
},
{
"name": "Python",
"bytes": "240814"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_admin_state_pac import TapiCommonAdminStatePac # noqa: F401,E501
from tapi_server.models.tapi_common_administrative_state import TapiCommonAdministrativeState # noqa: F401,E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState # noqa: F401,E501
from tapi_server.models.tapi_common_local_class import TapiCommonLocalClass # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState # noqa: F401,E501
from tapi_server.models.tapi_common_port_direction import TapiCommonPortDirection # noqa: F401,E501
from tapi_server.models.tapi_common_port_role import TapiCommonPortRole # noqa: F401,E501
from tapi_server.models.tapi_common_service_interface_point_ref import TapiCommonServiceInterfacePointRef # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connection_end_point_ref import TapiConnectivityConnectionEndPointRef # noqa: F401,E501
from tapi_server.models.tapi_connectivity_protection_role import TapiConnectivityProtectionRole # noqa: F401,E501
from tapi_server import util
class TapiConnectivityConnectivityServiceEndPoint(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, operational_state=None, lifecycle_state=None, administrative_state=None, name=None, local_id=None, protection_role=None, role=None, service_interface_point=None, layer_protocol_name=None, layer_protocol_qualifier=None, connection_end_point=None, direction=None, capacity=None): # noqa: E501
"""TapiConnectivityConnectivityServiceEndPoint - a model defined in OpenAPI
:param operational_state: The operational_state of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type operational_state: TapiCommonOperationalState
:param lifecycle_state: The lifecycle_state of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type lifecycle_state: TapiCommonLifecycleState
:param administrative_state: The administrative_state of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type administrative_state: TapiCommonAdministrativeState
:param name: The name of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param local_id: The local_id of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type local_id: str
:param protection_role: The protection_role of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type protection_role: TapiConnectivityProtectionRole
:param role: The role of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type role: TapiCommonPortRole
:param service_interface_point: The service_interface_point of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type service_interface_point: TapiCommonServiceInterfacePointRef
:param layer_protocol_name: The layer_protocol_name of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type layer_protocol_name: TapiCommonLayerProtocolName
:param layer_protocol_qualifier: The layer_protocol_qualifier of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type layer_protocol_qualifier: str
:param connection_end_point: The connection_end_point of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type connection_end_point: List[TapiConnectivityConnectionEndPointRef]
:param direction: The direction of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type direction: TapiCommonPortDirection
:param capacity: The capacity of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:type capacity: TapiCommonCapacity
"""
self.openapi_types = {
'operational_state': TapiCommonOperationalState,
'lifecycle_state': TapiCommonLifecycleState,
'administrative_state': TapiCommonAdministrativeState,
'name': List[TapiCommonNameAndValue],
'local_id': str,
'protection_role': TapiConnectivityProtectionRole,
'role': TapiCommonPortRole,
'service_interface_point': TapiCommonServiceInterfacePointRef,
'layer_protocol_name': TapiCommonLayerProtocolName,
'layer_protocol_qualifier': str,
'connection_end_point': List[TapiConnectivityConnectionEndPointRef],
'direction': TapiCommonPortDirection,
'capacity': TapiCommonCapacity
}
self.attribute_map = {
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'administrative_state': 'administrative-state',
'name': 'name',
'local_id': 'local-id',
'protection_role': 'protection-role',
'role': 'role',
'service_interface_point': 'service-interface-point',
'layer_protocol_name': 'layer-protocol-name',
'layer_protocol_qualifier': 'layer-protocol-qualifier',
'connection_end_point': 'connection-end-point',
'direction': 'direction',
'capacity': 'capacity'
}
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._administrative_state = administrative_state
self._name = name
self._local_id = local_id
self._protection_role = protection_role
self._role = role
self._service_interface_point = service_interface_point
self._layer_protocol_name = layer_protocol_name
self._layer_protocol_qualifier = layer_protocol_qualifier
self._connection_end_point = connection_end_point
self._direction = direction
self._capacity = capacity
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityConnectivityServiceEndPoint':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.ConnectivityServiceEndPoint of this TapiConnectivityConnectivityServiceEndPoint. # noqa: E501
:rtype: TapiConnectivityConnectivityServiceEndPoint
"""
return util.deserialize_model(dikt, cls)
@property
def operational_state(self):
"""Gets the operational_state of this TapiConnectivityConnectivityServiceEndPoint.
:return: The operational_state of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonOperationalState
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state):
"""Sets the operational_state of this TapiConnectivityConnectivityServiceEndPoint.
:param operational_state: The operational_state of this TapiConnectivityConnectivityServiceEndPoint.
:type operational_state: TapiCommonOperationalState
"""
self._operational_state = operational_state
@property
def lifecycle_state(self):
"""Gets the lifecycle_state of this TapiConnectivityConnectivityServiceEndPoint.
:return: The lifecycle_state of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonLifecycleState
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""Sets the lifecycle_state of this TapiConnectivityConnectivityServiceEndPoint.
:param lifecycle_state: The lifecycle_state of this TapiConnectivityConnectivityServiceEndPoint.
:type lifecycle_state: TapiCommonLifecycleState
"""
self._lifecycle_state = lifecycle_state
@property
def administrative_state(self):
"""Gets the administrative_state of this TapiConnectivityConnectivityServiceEndPoint.
:return: The administrative_state of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonAdministrativeState
"""
return self._administrative_state
@administrative_state.setter
def administrative_state(self, administrative_state):
"""Sets the administrative_state of this TapiConnectivityConnectivityServiceEndPoint.
:param administrative_state: The administrative_state of this TapiConnectivityConnectivityServiceEndPoint.
:type administrative_state: TapiCommonAdministrativeState
"""
self._administrative_state = administrative_state
@property
def name(self):
"""Gets the name of this TapiConnectivityConnectivityServiceEndPoint.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiConnectivityConnectivityServiceEndPoint.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiConnectivityConnectivityServiceEndPoint.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def local_id(self):
"""Gets the local_id of this TapiConnectivityConnectivityServiceEndPoint.
none # noqa: E501
:return: The local_id of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: str
"""
return self._local_id
@local_id.setter
def local_id(self, local_id):
"""Sets the local_id of this TapiConnectivityConnectivityServiceEndPoint.
none # noqa: E501
:param local_id: The local_id of this TapiConnectivityConnectivityServiceEndPoint.
:type local_id: str
"""
self._local_id = local_id
@property
def protection_role(self):
"""Gets the protection_role of this TapiConnectivityConnectivityServiceEndPoint.
:return: The protection_role of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiConnectivityProtectionRole
"""
return self._protection_role
@protection_role.setter
def protection_role(self, protection_role):
"""Sets the protection_role of this TapiConnectivityConnectivityServiceEndPoint.
:param protection_role: The protection_role of this TapiConnectivityConnectivityServiceEndPoint.
:type protection_role: TapiConnectivityProtectionRole
"""
self._protection_role = protection_role
@property
def role(self):
"""Gets the role of this TapiConnectivityConnectivityServiceEndPoint.
:return: The role of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonPortRole
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this TapiConnectivityConnectivityServiceEndPoint.
:param role: The role of this TapiConnectivityConnectivityServiceEndPoint.
:type role: TapiCommonPortRole
"""
self._role = role
@property
def service_interface_point(self):
"""Gets the service_interface_point of this TapiConnectivityConnectivityServiceEndPoint.
:return: The service_interface_point of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonServiceInterfacePointRef
"""
return self._service_interface_point
@service_interface_point.setter
def service_interface_point(self, service_interface_point):
"""Sets the service_interface_point of this TapiConnectivityConnectivityServiceEndPoint.
:param service_interface_point: The service_interface_point of this TapiConnectivityConnectivityServiceEndPoint.
:type service_interface_point: TapiCommonServiceInterfacePointRef
"""
self._service_interface_point = service_interface_point
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiConnectivityConnectivityServiceEndPoint.
:return: The layer_protocol_name of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonLayerProtocolName
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiConnectivityConnectivityServiceEndPoint.
:param layer_protocol_name: The layer_protocol_name of this TapiConnectivityConnectivityServiceEndPoint.
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self._layer_protocol_name = layer_protocol_name
@property
def layer_protocol_qualifier(self):
"""Gets the layer_protocol_qualifier of this TapiConnectivityConnectivityServiceEndPoint.
none # noqa: E501
:return: The layer_protocol_qualifier of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: str
"""
return self._layer_protocol_qualifier
@layer_protocol_qualifier.setter
def layer_protocol_qualifier(self, layer_protocol_qualifier):
"""Sets the layer_protocol_qualifier of this TapiConnectivityConnectivityServiceEndPoint.
none # noqa: E501
:param layer_protocol_qualifier: The layer_protocol_qualifier of this TapiConnectivityConnectivityServiceEndPoint.
:type layer_protocol_qualifier: str
"""
self._layer_protocol_qualifier = layer_protocol_qualifier
@property
def connection_end_point(self):
"""Gets the connection_end_point of this TapiConnectivityConnectivityServiceEndPoint.
none # noqa: E501
:return: The connection_end_point of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: List[TapiConnectivityConnectionEndPointRef]
"""
return self._connection_end_point
@connection_end_point.setter
def connection_end_point(self, connection_end_point):
"""Sets the connection_end_point of this TapiConnectivityConnectivityServiceEndPoint.
none # noqa: E501
:param connection_end_point: The connection_end_point of this TapiConnectivityConnectivityServiceEndPoint.
:type connection_end_point: List[TapiConnectivityConnectionEndPointRef]
"""
self._connection_end_point = connection_end_point
@property
def direction(self):
"""Gets the direction of this TapiConnectivityConnectivityServiceEndPoint.
:return: The direction of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonPortDirection
"""
return self._direction
@direction.setter
def direction(self, direction):
"""Sets the direction of this TapiConnectivityConnectivityServiceEndPoint.
:param direction: The direction of this TapiConnectivityConnectivityServiceEndPoint.
:type direction: TapiCommonPortDirection
"""
self._direction = direction
@property
def capacity(self):
"""Gets the capacity of this TapiConnectivityConnectivityServiceEndPoint.
:return: The capacity of this TapiConnectivityConnectivityServiceEndPoint.
:rtype: TapiCommonCapacity
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this TapiConnectivityConnectivityServiceEndPoint.
:param capacity: The capacity of this TapiConnectivityConnectivityServiceEndPoint.
:type capacity: TapiCommonCapacity
"""
self._capacity = capacity
| {
"content_hash": "a5b24ff702dcef632c2e109b153166aa",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 314,
"avg_line_length": 42.05569620253164,
"alnum_prop": 0.7180351553094149,
"repo_name": "karthik-sethuraman/ONFOpenTransport",
"id": "a32391099274102fe14012ed3e23b15a1f3661a6",
"size": "16629",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_connectivity_connectivity_service_end_point.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "836"
},
{
"name": "D",
"bytes": "2195"
},
{
"name": "Python",
"bytes": "960828"
},
{
"name": "Shell",
"bytes": "3059"
}
],
"symlink_target": ""
} |
"""
Provides step definitions to:
* run commands, like behave
* create textual files within a working directory
TODO:
matcher that ignores empty lines and whitespace and has contains comparison
"""
from __future__ import absolute_import, print_function
from behave import given, when, then, step, matchers
from behave4cmd0 import command_shell, command_util, pathutil, textutil
from behave4cmd0.pathutil import posixpath_normpath
import contextlib
import difflib
import os
import shutil
from hamcrest import assert_that, equal_to, is_not, contains_string
# -----------------------------------------------------------------------------
# INIT:
# -----------------------------------------------------------------------------
matchers.register_type(int=int)
DEBUG = False
# -----------------------------------------------------------------------------
# UTILITIES:
# -----------------------------------------------------------------------------
@contextlib.contextmanager
def on_assert_failed_print_details(actual, expected):
"""
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_assert_failed_print_details(actual_text, expected_text):
assert actual == expected
"""
try:
yield
except AssertionError:
# diff = difflib.unified_diff(expected.splitlines(), actual.splitlines(),
# "expected", "actual")
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}\n".format(actual))
raise
@contextlib.contextmanager
def on_error_print_details(actual, expected):
"""
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_error_print_details(actual_text, expected_text):
... # Do something
"""
try:
yield
except Exception:
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}".format(actual))
raise
# -----------------------------------------------------------------------------
# STEPS: WORKING DIR
# -----------------------------------------------------------------------------
@given(u'a new working directory')
def step_a_new_working_directory(context):
"""
Creates a new, empty working directory
"""
command_util.ensure_context_attribute_exists(context, "workdir", None)
command_util.ensure_workdir_exists(context)
shutil.rmtree(context.workdir, ignore_errors=True)
@given(u'I use the current directory as working directory')
def step_use_curdir_as_working_directory(context):
"""
Uses the current directory as working directory
"""
context.workdir = os.path.abspath(".")
command_util.ensure_workdir_exists(context)
# -----------------------------------------------------------------------------
# STEPS: Create files with contents
# -----------------------------------------------------------------------------
@given(u'a file named "{filename}" and encoding="{encoding}" with')
def step_a_file_named_filename_and_encoding_with(context, filename, encoding):
"""Creates a textual file with the content provided as docstring."""
__encoding_is_valid = True
assert context.text is not None, "ENSURE: multiline text is provided."
assert not os.path.isabs(filename)
assert __encoding_is_valid
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, context.text, encoding)
@given(u'a file named "{filename}" with')
def step_a_file_named_filename_with(context, filename):
"""Creates a textual file with the content provided as docstring."""
step_a_file_named_filename_and_encoding_with(context, filename, "UTF-8")
# -- SPECIAL CASE: For usage with behave steps.
if filename.endswith(".feature"):
command_util.ensure_context_attribute_exists(context, "features", [])
context.features.append(filename)
@given(u'an empty file named "{filename}"')
def step_an_empty_file_named_filename(context, filename):
"""
Creates an empty file.
"""
assert not os.path.isabs(filename)
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, "")
# -----------------------------------------------------------------------------
# STEPS: Run commands
# -----------------------------------------------------------------------------
@when(u'I run "{command}"')
@when(u'I run `{command}`')
def step_i_run_command(context, command):
"""
Run a command as subprocess, collect its output and returncode.
"""
command_util.ensure_workdir_exists(context)
context.command_result = command_shell.run(command, cwd=context.workdir)
command_util.workdir_save_coverage_files(context.workdir)
if False and DEBUG:
print(u"run_command: {0}".format(command))
print(u"run_command.output {0}".format(context.command_result.output))
@when(u'I successfully run "{command}"')
@when(u'I successfully run `{command}`')
def step_i_successfully_run_command(context, command):
step_i_run_command(context, command)
step_it_should_pass(context)
@then(u'it should fail with result "{result:int}"')
def step_it_should_fail_with_result(context, result):
assert_that(context.command_result.returncode, equal_to(result))
assert_that(result, is_not(equal_to(0)))
@then(u'the command should fail with returncode="{result:int}"')
def step_it_should_fail_with_returncode(context, result):
assert_that(context.command_result.returncode, equal_to(result))
assert_that(result, is_not(equal_to(0)))
@then(u'the command returncode is "{result:int}"')
def step_the_command_returncode_is(context, result):
assert_that(context.command_result.returncode, equal_to(result))
@then(u'the command returncode is non-zero')
def step_the_command_returncode_is_nonzero(context):
assert_that(context.command_result.returncode, is_not(equal_to(0)))
@then(u'it should pass')
def step_it_should_pass(context):
assert_that(context.command_result.returncode, equal_to(0),
context.command_result.output)
@then(u'it should fail')
def step_it_should_fail(context):
assert_that(context.command_result.returncode, is_not(equal_to(0)),
context.command_result.output)
@then(u'it should pass with')
def step_it_should_pass_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, equal_to(0),
context.command_result.output)
@then(u'it should fail with')
def step_it_should_fail_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, is_not(equal_to(0)))
# -----------------------------------------------------------------------------
# STEPS FOR: Output Comparison
# -----------------------------------------------------------------------------
@then(u'the command output should contain "{text}"')
def step_command_output_should_contain_text(context, text):
'''
EXAMPLE:
...
Then the command output should contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_contain(actual_output, expected_text)
@then(u'the command output should not contain "{text}"')
def step_command_output_should_not_contain_text(context, text):
'''
EXAMPLE:
...
then the command output should not contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_not_contain(actual_output, expected_text)
@then(u'the command output should contain "{text}" {count:d} times')
def step_command_output_should_contain_text_multiple_times(context, text, count):
'''
EXAMPLE:
...
Then the command output should contain "TEXT" 3 times
'''
assert count >= 0
expected_text = text
if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_contain_multiple_times(actual_output,
expected_text,
count)
@then(u'the command output should contain exactly "{text}"')
def step_command_output_should_contain_exactly_text(context, text):
"""
Verifies that the command output of the last command contains the
expected text.
.. code-block:: gherkin
When I run "echo Hello"
Then the command output should contain "Hello"
"""
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
textutil.assert_text_should_contain_exactly(actual_output, expected_text)
@then(u'the command output should not contain exactly "{text}"')
def step_command_output_should_not_contain_exactly_text(context, text):
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
textutil.assert_text_should_not_contain_exactly(actual_output, expected_text)
@then(u'the command output should contain')
def step_command_output_should_contain(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass
and the command output should contain:
"""
TEXT
"""
'''
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_contain_text(context, context.text)
@then(u'the command output should not contain')
def step_command_output_should_not_contain(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass
and the command output should not contain:
"""
TEXT
"""
'''
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_not_contain_text(context, context.text.strip())
@then(u'the command output should contain {count:d} times')
def step_command_output_should_contain_multiple_times(context, count):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass
and the command output should contain 2 times:
"""
TEXT
"""
'''
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_contain_text_multiple_times(context,
context.text, count)
@then(u'the command output should contain exactly')
def step_command_output_should_contain_exactly_with_multiline_text(context):
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_contain_exactly_text(context, context.text)
@then(u'the command output should not contain exactly')
def step_command_output_should_contain_not_exactly_with_multiline_text(context):
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_not_contain_exactly_text(context, context.text)
# -----------------------------------------------------------------------------
# STEPS FOR: Directories
# -----------------------------------------------------------------------------
@step(u'I remove the directory "{directory}"')
def step_remove_directory(context, directory):
path_ = directory
if not os.path.isabs(directory):
path_ = os.path.join(context.workdir, os.path.normpath(directory))
if os.path.isdir(path_):
shutil.rmtree(path_, ignore_errors=True)
assert_that(not os.path.isdir(path_))
@given(u'I ensure that the directory "{directory}" does not exist')
def step_given_the_directory_should_not_exist(context, directory):
step_remove_directory(context, directory)
@given(u'a directory named "{path}"')
def step_directory_named_dirname(context, path):
assert context.workdir, "REQUIRE: context.workdir"
path_ = os.path.join(context.workdir, os.path.normpath(path))
if not os.path.exists(path_):
os.makedirs(path_)
assert os.path.isdir(path_)
@then(u'the directory "{directory}" should exist')
def step_the_directory_should_exist(context, directory):
path_ = directory
if not os.path.isabs(directory):
path_ = os.path.join(context.workdir, os.path.normpath(directory))
assert_that(os.path.isdir(path_))
@then(u'the directory "{directory}" should not exist')
def step_the_directory_should_not_exist(context, directory):
path_ = directory
if not os.path.isabs(directory):
path_ = os.path.join(context.workdir, os.path.normpath(directory))
assert_that(not os.path.isdir(path_))
@step(u'the directory "{directory}" exists')
def step_directory_exists(context, directory):
"""
Verifies that a directory exists.
.. code-block:: gherkin
Given the directory "abc.txt" exists
When the directory "abc.txt" exists
"""
step_the_directory_should_exist(context, directory)
@step(u'the directory "{directory}" does not exist')
def step_directory_named_does_not_exist(context, directory):
"""
Verifies that a directory does not exist.
.. code-block:: gherkin
Given the directory "abc/" does not exist
When the directory "abc/" does not exist
"""
step_the_directory_should_not_exist(context, directory)
# -----------------------------------------------------------------------------
# FILE STEPS:
# -----------------------------------------------------------------------------
@step(u'a file named "{filename}" exists')
def step_file_named_filename_exists(context, filename):
"""
Verifies that a file with this filename exists.
.. code-block:: gherkin
Given a file named "abc.txt" exists
When a file named "abc.txt" exists
"""
step_file_named_filename_should_exist(context, filename)
@step(u'a file named "{filename}" does not exist')
def step_file_named_filename_does_not_exist(context, filename):
"""
Verifies that a file with this filename does not exist.
.. code-block:: gherkin
Given a file named "abc.txt" does not exist
When a file named "abc.txt" does not exist
"""
step_file_named_filename_should_not_exist(context, filename)
@then(u'a file named "{filename}" should exist')
def step_file_named_filename_should_exist(context, filename):
command_util.ensure_workdir_exists(context)
filename_ = pathutil.realpath_with_context(filename, context)
assert_that(os.path.exists(filename_) and os.path.isfile(filename_))
@then(u'a file named "{filename}" should not exist')
def step_file_named_filename_should_not_exist(context, filename):
command_util.ensure_workdir_exists(context)
filename_ = pathutil.realpath_with_context(filename, context)
assert_that(not os.path.exists(filename_))
# -----------------------------------------------------------------------------
# STEPS FOR FILE CONTENTS:
# -----------------------------------------------------------------------------
@then(u'the file "{filename}" should contain "{text}"')
def step_file_should_contain_text(context, filename, text):
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
file_contents = pathutil.read_file_contents(filename, context=context)
file_contents = file_contents.rstrip()
with on_assert_failed_print_details(file_contents, expected_text):
textutil.assert_normtext_should_contain(file_contents, expected_text)
@then(u'the file "{filename}" should not contain "{text}"')
def step_file_should_not_contain_text(context, filename, text):
file_contents = pathutil.read_file_contents(filename, context=context)
file_contents = file_contents.rstrip()
textutil.assert_normtext_should_not_contain(file_contents, text)
# XXX assert_that(file_contents, is_not(contains_string(text)))
@then(u'the file "{filename}" should contain')
def step_file_should_contain_multiline_text(context, filename):
assert context.text is not None, "REQUIRE: multiline text"
step_file_should_contain_text(context, filename, context.text)
@then(u'the file "{filename}" should not contain')
def step_file_should_not_contain_multiline_text(context, filename):
assert context.text is not None, "REQUIRE: multiline text"
step_file_should_not_contain_text(context, filename, context.text)
# -----------------------------------------------------------------------------
# ENVIRONMENT VARIABLES
# -----------------------------------------------------------------------------
@step(u'I set the environment variable "{env_name}" to "{env_value}"')
def step_I_set_the_environment_variable_to(context, env_name, env_value):
if not hasattr(context, "environ"):
context.environ = {}
context.environ[env_name] = env_value
os.environ[env_name] = env_value
@step(u'I remove the environment variable "{env_name}"')
def step_I_remove_the_environment_variable(context, env_name):
if not hasattr(context, "environ"):
context.environ = {}
context.environ[env_name] = ""
os.environ[env_name] = ""
del context.environ[env_name]
del os.environ[env_name]
| {
"content_hash": "5c8926b919569f94d2944a4642e143e0",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 81,
"avg_line_length": 37.96928982725528,
"alnum_prop": 0.6127792943079567,
"repo_name": "Abdoctor/behave",
"id": "fd4c830a8e5c4e371217598a7a43f5157c1d088f",
"size": "19799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behave4cmd0/command_steps.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "479"
},
{
"name": "Cucumber",
"bytes": "674035"
},
{
"name": "Python",
"bytes": "919726"
},
{
"name": "Shell",
"bytes": "1030"
}
],
"symlink_target": ""
} |
import sys
import DefaultTable
import array
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
class table__c_v_t(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
values = array.array("h")
values.fromstring(data)
if sys.byteorder <> "big":
values.byteswap()
self.values = values
def compile(self, ttFont):
values = self.values[:]
if sys.byteorder <> "big":
values.byteswap()
return values.tostring()
def toXML(self, writer, ttFont):
for i in range(len(self.values)):
value = self.values[i]
writer.simpletag("cv", value=value, index=i)
writer.newline()
def fromXML(self, (name, attrs, content), ttFont):
if not hasattr(self, "values"):
self.values = array.array("h")
if name == "cv":
index = safeEval(attrs["index"])
value = safeEval(attrs["value"])
for i in range(1 + index - len(self.values)):
self.values.append(0)
self.values[index] = value
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
def __setitem__(self, index, value):
self.values[index] = value
def __delitem__(self, index):
del self.values[index]
| {
"content_hash": "1f626e1a414b7c7ee904258d8640015f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 51,
"avg_line_length": 24.183673469387756,
"alnum_prop": 0.6717299578059072,
"repo_name": "shadowmint/nwidget",
"id": "be08ca3ae565eb03990b953414db5d4098ce487d",
"size": "1185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/fonttools-2.3/Lib/fontTools/ttLib/tables/_c_v_t.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11298"
},
{
"name": "JavaScript",
"bytes": "17394"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9815941"
},
{
"name": "Shell",
"bytes": "10521"
}
],
"symlink_target": ""
} |
import sys
if sys.version_info[:2] < (3, 2):
from xml.sax.saxutils import escape
else:
from html import escape
WINNERS = ("Nikolai Andrianov", "Matt Biondi", "Bjørn Dæhlie",
"Birgit Fischer", "Sawao Kato", "Larisa Latynina", "Carl Lewis",
"Michael Phelps", "Mark Spitz", "Jenny Thompson")
def main():
htmlLayout = Layout(html_tabulator)
for rows in range(2, 6):
print(htmlLayout.tabulate(rows, WINNERS))
textLayout = Layout(text_tabulator)
for rows in range(2, 6):
print(textLayout.tabulate(rows, WINNERS))
class Layout:
def __init__(self, tabulator):
self.tabulator = tabulator
def tabulate(self, rows, items):
return self.tabulator(rows, items)
def html_tabulator(rows, items):
columns, remainder = divmod(len(items), rows)
if remainder:
columns += 1
column = 0
table = ['<table border="1">\n']
for item in items:
if column == 0:
table.append("<tr>")
table.append("<td>{}</td>".format(escape(str(item))))
column += 1
if column == columns:
table.append("</tr>\n")
column %= columns
if table[-1][-1] != "\n":
table.append("</tr>\n")
table.append("</table>\n")
return "".join(table)
def text_tabulator(rows, items):
columns, remainder = divmod(len(items), rows)
if remainder:
columns += 1
remainder = (rows * columns) - len(items)
if remainder == columns:
remainder = 0
column = columnWidth = 0
for item in items:
columnWidth = max(columnWidth, len(item))
columnDivider = ("-" * (columnWidth + 2)) + "+"
divider = "+" + (columnDivider * columns) + "\n"
table = [divider]
for item in items + (("",) * remainder):
if column == 0:
table.append("|")
table.append(" {:<{}} |".format(item, columnWidth))
column += 1
if column == columns:
table.append("\n")
column %= columns
table.append(divider)
return "".join(table)
if __name__ == "__main__":
main()
| {
"content_hash": "2593a411055b9e13ed32f4d367128b14",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 72,
"avg_line_length": 27.658227848101266,
"alnum_prop": 0.54279176201373,
"repo_name": "nwiizo/workspace_2017",
"id": "7b3af895d4d63c260134d44e23d2e40812b45d97",
"size": "2805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pipng/tabulator3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import geotrek.authent.models
class Migration(migrations.Migration):
dependencies = [
('common', '0002_auto_20170323_1433'),
]
operations = [
migrations.AlterField(
model_name='filetype',
name='structure',
field=models.ForeignKey(blank=True, db_column='structure', default=geotrek.authent.models.default_structure_pk, null=True, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='organism',
name='structure',
field=models.ForeignKey(blank=True, db_column='structure', default=geotrek.authent.models.default_structure_pk, null=True, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
]
| {
"content_hash": "5605716a70575d39317d712cfb7b403f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 238,
"avg_line_length": 40.78260869565217,
"alnum_prop": 0.6748400852878464,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "a31d18ae2517147e65284876fc354793598b76cd",
"size": "989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/common/migrations/0003_auto_20180608_1236.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
} |
__author__ = 'api.roman.public@gmail.com (Roman Nurik)'
import gdata.maps.client
import gdata.client
import gdata.sample_util
import gdata.data
import atom.data
class MapsExample:
def __init__(self):
"""Creates a GDataService and provides ClientLogin auth details to it."""
# Authenticate using ClientLogin, AuthSub, or OAuth.
self.client = gdata.maps.client.MapsClient()
#self.client.http_client.debug = True
gdata.sample_util.authorize_client(
self.client, service='local', source='MapsData_Python_Sample-2.0',
scopes=['http://maps.google.com/maps/feeds/'])
def PrintAllMaps(self):
"""Prints a list of all the user's maps."""
# Request the feed.
feed = self.client.get_maps()
# Print the results.
print feed.title.text
for entry in feed.entry:
print "\t%s (map id=%s)" % (entry.title.text, entry.get_map_id())
print
def CreateMap(self, title, description, is_unlisted):
"""Creates a new map."""
return self.client.create_map(title, description, unlisted=is_unlisted)
def CreateFeature(self, map_id, title, content):
"""Adds a feature with the given title and content to the map."""
return self.client.add_feature(map_id, title, content)
def PrintAllFeatures(self, map_id):
"""Displays all features in a map."""
# Request the feed.
feed = self.client.get_features(map_id)
# Print the results.
print feed.title.text
for entry in feed.entry:
if not entry.title.text:
print "\tNo Title"
else:
print "\t%s (feature id=%s)" % (entry.title.text.encode('utf-8'),
entry.get_feature_id())
print
def UpdateMapTitle(self, entry_to_update, new_title):
"""Updates the title of the given entry.
If the insertion is successful, the updated feature will be returned.
"""
# Set the new title in the Entry object
entry_to_update.title = atom.data.Title(type='text', text=new_title)
return self.client.update(entry_to_update)
def DeleteFeature(self, feature_entry):
"""Removes the feature specified by the given edit_link_href."""
self.client.delete(feature_entry)
def DeleteMap(self, map_entry):
"""Removes the map specified by the given edit_link_href."""
self.client.delete(map_entry)
def run(self):
"""Runs each of the example methods defined above, demonstrating how to
interface with the Maps Data service.
"""
# Demonstrate retrieving a list of the user's maps.
self.PrintAllMaps()
# Demonstrate how to create an unlisted map.
unlisted_map = self.CreateMap('Whoa an unlisted map', 'a description',
is_unlisted=True)
print 'Successfully created unlisted map: %s' % unlisted_map.title.text
# Delete the unlisted map.
self.client.delete(unlisted_map)
# Demonstrate how to publish a public map.
public_map = self.CreateMap('Some cool new public map', 'a description',
is_unlisted=False)
print "Successfully created unlisted map: %s" % public_map.title.text
# Demonstrate updating a map's title.
print "Now updating the title of the map we just created:"
public_map = self.UpdateMapTitle(public_map, 'GData sample public map')
print "Successfully changed the map's title to: %s" % public_map.title.text
# Demonstrate how to retrieve the features for a map.
# Get the map ID and build the feature feed URI for the specified map
map_id = public_map.get_map_id()
print "Now adding a feature to the map titled: %s" % public_map.title.text
feature = self.CreateFeature(map_id, "A point feature",
'<Placemark><description>Hello there!</description>'
'<Point><coordinates>-122,37</coordinates></Point></Placemark>')
print ("Successfully created feature '%s' on the map titled '%s'"
% (feature.title.text, public_map.title.text))
feature_id = feature.get_feature_id()
print "Now printing all features"
self.PrintAllFeatures(map_id)
# Delete the feature we just added
print "Now deleting the feature we just added"
self.DeleteFeature(feature)
print "Successfully deleted feature."
self.PrintAllFeatures(map_id)
# Demonstrate deleting maps.
print "Now deleting the map titled: %s" % public_map.title.text
self.DeleteMap(public_map)
print "Successfully deleted map."
self.PrintAllMaps()
def main():
"""The main function runs the MapsExample application.
NOTE: It is recommended that you run this sample using a test account.
"""
sample = MapsExample()
sample.run()
if __name__ == '__main__':
main()
| {
"content_hash": "c37449972b6d58b0daf4b0d204b69815",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 32.689655172413794,
"alnum_prop": 0.6609704641350211,
"repo_name": "dekom/threepress-bookworm-read-only",
"id": "a636989ca9c21f947ab4bdc01990d4f2caa4f0cc",
"size": "5342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bookworm/gdata/samples/maps/MapsExample.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "89161"
},
{
"name": "Java",
"bytes": "6443"
},
{
"name": "JavaScript",
"bytes": "6632"
},
{
"name": "OpenEdge ABL",
"bytes": "125979"
},
{
"name": "Python",
"bytes": "4448604"
},
{
"name": "Shell",
"bytes": "2936"
}
],
"symlink_target": ""
} |
"""Utility functions used by values.py and ps_values.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.saved_model import save_context
from tensorflow.python.saved_model import save_options
from tensorflow.python.training.saving import saveable_object
def write_object_proto(var, proto, options):
"""Update a SavedObject proto for the caller.
If a DistributedVariable object supports this method, it will be called when
saving with a pre-built `SavedObject` proto representing the object, plus an
instance of `SaveOptions`. This method is then free to modify that proto
instance.
`DistributedVariable` with `AUTO` or `ON_WRITE` synchronization optionally
write out information about their components to the
`experimental_distributed_variable_components` field of a
`SavedVariable` (depending on the `SaveOptions` variable policy).
Args:
var: The DistributedVariable object.
proto: A pre-built `SavedObject` proto for this object. It is assumed this
will be a `SavedVariable` instance.
options: A `SaveOptions` instance.
"""
if options.experimental_variable_policy._expand_distributed_variables( # pylint: disable=protected-access
):
for var in var.values:
var_proto = (
proto.variable.experimental_distributed_variable_components.add())
var_proto.name = var.name.split(":")[0]
var_proto.device = var.device
def get_on_write_saveable(var, primary_var, name):
"""Return saveable spec for AUTO and ON_WRITE variables."""
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
if context.executing_eagerly() and not primary_var.is_initialized():
# A SaveSpec tensor value of `None` indicates that the variable is
# uninitialized.
return None
strategy = var.distribute_strategy
return strategy.extended.read_var(var)
spec = saveable_object.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=var.dtype,
device=primary_var.device)
return tensor, [spec]
def get_on_write_restore_ops(var, tensor):
"""Return restore ops for AUTO and ON_WRITE variables."""
packed_var = var._packed_variable # pylint: disable=protected-access
if packed_var is not None:
return control_flow_ops.group(
tuple(
assign_on_device(d, packed_var, tensor)
for d in packed_var.devices))
return control_flow_ops.group(
tuple(
assign_on_device(v.device, v, tensor)
for v in var.values))
def get_on_read_saveable(var, primary_var, name):
"""Return saveables for ON_READ variable."""
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
return var._get_cross_replica() # pylint: disable=protected-access
spec = saveable_object.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=var.dtype,
device=primary_var.device)
return tensor, [spec]
def get_on_read_restore_ops(var, tensor, aggregation):
"""Return restore ops for ON_READ variables."""
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
if aggregation == vs.VariableAggregation.SUM:
strategy = var.distribute_strategy
tensor = math_ops.cast(tensor / strategy.num_replicas_in_sync,
var.dtype)
return control_flow_ops.group(
tuple(
assign_on_device(v.device, v, tensor)
for v in var.values))
# Utility function that indicates if you are in an UpdateContext when running
# in a replica fn.
def in_replica_update_context():
return distribute_lib.get_update_replica_id() is not None
def on_write_assign(var, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def on_write_assign_add(var, value, use_locking=False, name=None,
read_value=True):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def on_write_assign_sub(var, value, use_locking=False, name=None,
read_value=True):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_on_each_device(var, assign_func, value, read_value):
"""Update the variable on each replica with the given assign_func and value."""
if var._packed_variable is not None: # pylint: disable=protected-access
update = control_flow_ops.group(
tuple(
assign_func(d, var._packed_variable, value) for d in var._devices)) # pylint: disable=protected-access
else:
update = control_flow_ops.group(
tuple(assign_func(v.device, v, value) for v in var._values)) # pylint: disable=protected-access
if not read_value:
return update
with ops.control_dependencies([update] if update else []):
return var.read_value()
def on_read_assign_sub_cross_replica(var, value, read_value=True):
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if ds_context.in_cross_replica_context():
if var.aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_sub` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return assign_on_each_device(var, assign_sub_on_device,
value, read_value)
def on_read_assign_add_cross_replica(var, value, read_value=True):
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if ds_context.in_cross_replica_context():
if var.aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_add` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return assign_on_each_device(var, assign_add_on_device,
value, read_value)
def on_read_assign_cross_replica(var, value, read_value=True):
"""Return the value of the variable in cross replica context."""
with ds_context.enter_or_assert_strategy(var.distribute_strategy):
if ds_context.in_cross_replica_context():
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor = value
if var.aggregation == vs.VariableAggregation.SUM:
strategy = var._distribute_strategy # pylint: disable=protected-access
tensor = math_ops.cast(tensor / strategy.num_replicas_in_sync,
var.dtype)
return assign_on_each_device(var, assign_on_device, tensor,
read_value)
def scatter_sub(var, sparse_delta, use_locking=False, name=None):
scatter_sub_fn = lambda var, *a, **kw: var.scatter_sub(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_sub_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_add(var, sparse_delta, use_locking=False, name=None):
scatter_add_fn = lambda var, *a, **kw: var.scatter_add(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_add_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_mul(var, sparse_delta, use_locking=False, name=None):
scatter_mul_fn = lambda var, *a, **kw: var.scatter_mul(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_mul_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_div(var, sparse_delta, use_locking=False, name=None):
scatter_div_fn = lambda var, *a, **kw: var.scatter_div(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_div_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_min(var, sparse_delta, use_locking=False, name=None):
scatter_min_fn = lambda var, *a, **kw: var.scatter_min(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_min_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_max(var, sparse_delta, use_locking=False, name=None):
scatter_max_fn = lambda var, *a, **kw: var.scatter_max(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_max_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_update(var, sparse_delta, use_locking=False, name=None):
scatter_update_fn = lambda var, *a, **kw: var.scatter_update(*a, **kw)
return var._update( # pylint: disable=protected-access
update_fn=scatter_update_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def get_current_replica_id_as_int():
"""Returns the current replica ID as an integer, or `None`."""
replica_context = ds_context.get_replica_context()
if replica_context:
replica_id = replica_context._replica_id # pylint: disable=protected-access
if not isinstance(replica_id, int):
replica_id = tensor_util.constant_value(replica_id)
else:
replica_id = distribute_lib.get_update_replica_id()
return replica_id
def assign_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign(tensor)
def assign_add_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_add(tensor)
def assign_sub_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_sub(tensor)
def assert_replica_context(strategy):
replica_context = ds_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
def apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(
strategy.experimental_local_results(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
aggregation_error_msg = (
"You must specify an aggregation method to update a "
"{variable_type} in Replica Context. You can do so by passing "
"an explicit value for argument `aggregation` to tf.Variable(..)."
"e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`"
"`tf.VariableAggregation` lists the possible aggregation methods."
"This is required because {variable_type} should always be "
"kept in sync. When updating them or assigning to them in a "
"replica context, we automatically try to aggregate the values "
"before updating the variable. For this aggregation, we need to "
"know the aggregation method. "
"Another alternative is to not try to update such "
"{variable_type} in replica context, but in cross replica "
"context. You can enter cross replica context by calling "
"`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`."
"Inside `merge_fn`, you can then update the {variable_type} "
"using `tf.distribute.StrategyExtended.update()`.")
scatter_error_msg = ("{op_name} is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: {aggregation}.")
def is_saving_non_distributed():
"""Returns whether we're saving a non-distributed version of the model.
It returns True iff we are in saving context and are saving a non-distributed
version of the model. That is, SaveOptions.experimental_variable_policy is
NONE.
Returns:
A boolean.
"""
if not save_context.in_save_context():
return False
options = save_context.get_save_options()
return (options.experimental_variable_policy !=
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES)
def mark_as_unsaveable():
"""Marks the function as unsaveable if not inside save context."""
if ops.inside_function() and not save_context.in_save_context():
ops.get_default_graph().mark_as_unsaveable("""
ConcreteFunction that uses distributed variables in certain way cannot be saved.
If you're saving with
tf.saved_model.save(..., signatures=f.get_concrete_function())
do
@tf.function(input_signature=...)
def f_with_input_signature():
...
tf.saved_model.save(..., signatures=f_with_input_signature)`
instead.""")
| {
"content_hash": "d92d8a56eb615e299816d62d21139234",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 115,
"avg_line_length": 37.25857519788918,
"alnum_prop": 0.6903193824799944,
"repo_name": "frreiss/tensorflow-fred",
"id": "369e2435d9bca44cc37b57ce62eb8988d61c6369",
"size": "14810",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/values_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from django.contrib import admin
import djangotoolbox.fields as toolbox
from django import forms
from django.forms import ModelForm
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.db import models
from django.contrib import messages
from django.utils.html import escape
try: from django.utils import simplejson as json
except ImportError: import json
import datetime
from models import *
try:
from google.appengine.api.taskqueue import Task
except:
from google.appengine.api.labs.taskqueue import Task
class ProstheticForm(ModelForm):
class Meta:
model = Prosthetic
def __init__(self, *args, **kwargs):
super(ProstheticForm, self).__init__(*args, **kwargs)
# do at run time to avoid import import loop
from introspection import prosthetic_class_lookup
self.fields["classname"].choices = [ [ k, k ] for k in prosthetic_class_lookup ]
class ProstheticAdmin(admin.ModelAdmin):
list_display = ( "name", "server", "classname", "show_on_homepage" )
readonly_fields = ( "created", "blog_keyword", )
form = ProstheticForm
class AccessTokenAdmin(admin.ModelAdmin):
list_display = ( "weavr_name", "prosthetic_desc", "oauth_key", "last_run", "last_success","enabled", "revoked", )
actions = [ "enable_token", "disable_token", "run_token" ]
list_filter = ["prosthetic", "enabled", "revoked", "last_success"]
fieldsets = [
("Weavr", {
"fields":[ "weavr_name", "weavr_url", ("prosthetic", "oauth_key", "oauth_secret"), ],
}),
("State", {
"fields":[ "revoked", "enabled", "data"],
}),
("History", {
"fields":["last_run", "last_success", "last_result", "pretty_historical_results"],
}),
]
readonly_fields = [ "oauth_key", "oauth_secret", "prosthetic", "weavr_url", "weavr_name", "pretty_historical_results", "last_success", "last_result" ]
def changelist_view(self, request, extra_context=None):
# don't apply changes if we're coming from an existing admin page, thus
# allowing us to select 'ALL'
if not request.META.get("QUERY_STRING", None):
if not request.GET.has_key('revoked__exact'):
q = request.GET.copy()
q['revoked__exact'] = '0'
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
if not request.GET.has_key('enabled__exact'):
q = request.GET.copy()
q['enabled__exact'] = '1'
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
return super(AccessTokenAdmin,self).changelist_view(request, extra_context=extra_context)
def pretty_historical_results(self, obj):
if not obj.historical_results:
return "No history"
try:
results = json.loads(obj.historical_results)
def pretty_row(r):
return "<b>%s:</b> %s"%(datetime.datetime.utcfromtimestamp(r[0]).strftime("%Y-%m-%d %H:%M"), escape(r[1]))
return "<br>".join(map(pretty_row, results))
except Exception, e:
return "Error inflating results: %s (tried to inflate '%s')"%(e, obj.historical_results)
pretty_historical_results.short_description = "History"
pretty_historical_results.allow_tags = True
def prosthetic_desc(self, obj):
return unicode(obj.prosthetic)
def disable_token(modeladmin, request, queryset):
for q in queryset:
q.enabled = False
q.save()
disable_token.short_description = "Disable Token"
def enable_token(modeladmin, request, queryset):
for q in queryset:
q.enabled = True
q.save()
enable_token.short_description = "Enable Token"
def run_token(modeladmin, request, queryset):
for token in queryset:
task = Task(url='/runner/run_task/', method='POST', params={'token': token.oauth_key, "force":"true"})
task.add('default')
run_token.short_description = "Force run of this token"
class RequestTokenAdmin(admin.ModelAdmin):
list_display = ( "oauth_key", "prosthetic", "created", )
admin.site.register(Prosthetic, ProstheticAdmin)
admin.site.register(AccessToken, AccessTokenAdmin)
admin.site.register(RequestToken, RequestTokenAdmin)
def admin_action(request, key):
token = get_object_or_404(AccessToken, id=key)
if "enable" in request.POST:
token.enabled = True
token.save()
messages.add_message(request, messages.SUCCESS, 'token enabled')
if "disable" in request.POST:
token.enabled = False
token.save()
messages.add_message(request, messages.SUCCESS, 'token disabled')
if "force" in request.POST:
task = Task(url='/runner/run_task/', method='POST', params={'token': token.oauth_key, "force":"true"})
task.add('default')
messages.add_message(request, messages.SUCCESS, 'run queued')
return redirect("/admin/webapp/accesstoken/%s/"%key)
def view_blog(request, key):
token = get_object_or_404(AccessToken, id=key)
logging.info(token.blog_filter_url())
return redirect(token.blog_filter_url())
| {
"content_hash": "2e181ace33864b8f1ec293a9e93e9273",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 154,
"avg_line_length": 34.25,
"alnum_prop": 0.6253041362530414,
"repo_name": "philterphactory/prosthetic-runner",
"id": "3f80b26ef7de64dc230f52230e4bea5d17cb7250",
"size": "6682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "120189"
},
{
"name": "Python",
"bytes": "261678"
},
{
"name": "Shell",
"bytes": "2450"
}
],
"symlink_target": ""
} |
import json
import time
import uuid
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_sqs, mock_lambda, mock_logs
from tests.test_awslambda.test_lambda import get_test_zip_file1, get_role_name
@mock_logs
@mock_lambda
@mock_sqs
def test_invoke_function_from_sqs_exception():
logs_conn = boto3.client("logs", region_name="us-east-1")
sqs = boto3.resource("sqs", region_name="us-east-1")
queue = sqs.create_queue(QueueName="test-sqs-queue1")
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=get_role_name(),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": get_test_zip_file1()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
response = conn.create_event_source_mapping(
EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"]
)
assert response["EventSourceArn"] == queue.attributes["QueueArn"]
assert response["State"] == "Enabled"
entries = [
{
"Id": "1",
"MessageBody": json.dumps({"uuid": str(uuid.uuid4()), "test": "test"}),
}
]
queue.send_messages(Entries=entries)
start = time.time()
while (time.time() - start) < 30:
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) >= 1
result = logs_conn.get_log_events(
logGroupName="/aws/lambda/testFunction",
logStreamName=log_streams[0]["logStreamName"],
)
for event in result.get("events"):
if "custom log event" in event["message"]:
return
time.sleep(1)
assert False, "Test Failed"
| {
"content_hash": "1d8dafef47b8e2c0c136be09cb0d6299",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 30.196969696969695,
"alnum_prop": 0.6156547917711992,
"repo_name": "spulec/moto",
"id": "f9e6da573ce1719aa6150c4974e96d5d1023dfe2",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sqs/test_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Cart.last_update'
db.add_column('sell_cart', 'last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2012, 2, 16, 10, 56, 1, 834657), blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Cart.last_update'
db.delete_column('sell_cart', 'last_update')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'charge_on_card_as': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'default': "'contact@yourstore.com'", 'max_length': '75'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'sell.cart': {
'Meta': {'object_name': 'Cart'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"})
},
'sell.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Cart']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {})
},
'sell.payment': {
'Meta': {'object_name': 'Payment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'payment_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.PaymentHistory']"}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.paymenthistory': {
'Meta': {'object_name': 'PaymentHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Payment']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'sell.sell': {
'Meta': {'object_name': 'Sell'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'cancel': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'Manual Payment'", 'max_length': '255'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']", 'null': 'True'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_shipping': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_taxes': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_without_taxes': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.sellitem': {
'Meta': {'object_name': 'SellItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {}),
'sell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Sell']"})
},
'sell.shipping': {
'Meta': {'object_name': 'Shipping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shipping_service': ('django.db.models.fields.CharField', [], {'default': "'Other'", 'max_length': '255'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'shipping_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.ShippingHistory']"}),
'tracking_number': ('django.db.models.fields.CharField', [], {'default': "'--'", 'max_length': '255'})
},
'sell.shippingdata': {
'Meta': {'object_name': 'ShippingData'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sell.shippinghistory': {
'Meta': {'object_name': 'ShippingHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shipping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Shipping']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_date_to_change_layout': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_date_to_post': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['sell']
| {
"content_hash": "5b28207878d1766154bc343ce057cb04",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 205,
"avg_line_length": 76.66863905325444,
"alnum_prop": 0.5444933240719302,
"repo_name": "StephenPower/CollectorCity-Market-Place",
"id": "c6ef13c722f46b11f9ec4c491e05e781c7292874",
"size": "12975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stores/apps/sell/migrations/0008_auto__add_field_cart_last_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "796501"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from azure.mgmt.web import WebSiteManagementClient
from ..azure_common import BaseTest, arm_template, cassette_name
from c7n_azure.session import Session
from mock import patch
from c7n.exceptions import PolicyValidationError
from c7n.utils import local_session
class AppServicePlanTest(BaseTest):
def setUp(self):
super(AppServicePlanTest, self).setUp()
self.session = local_session(Session)
self.client = local_session(Session).client(
'azure.mgmt.web.WebSiteManagementClient') # type: WebSiteManagementClient
self.update_mock_path =\
'azure.mgmt.web.v{}.operations._app_service_plans_operations.' \
'AppServicePlansOperations.update'\
.format(self.client._get_api_version('app_service_plans').replace('-', '_'))
def test_app_service_plan_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-appserviceplan-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'offhour',
'default_tz': "pt",
'offhour': 18,
'tag': 'schedule'},
{'type': 'onhour',
'default_tz': "pt",
'onhour': 18,
'tag': 'schedule'}],
'actions': [
{'type': 'resize-plan',
'size': 'F1'}],
}, validate=True)
self.assertTrue(p)
# size and count are missing
with self.assertRaises(PolicyValidationError):
self.load_policy({
'name': 'test-azure-appserviceplan',
'resource': 'azure.appserviceplan',
'actions': [
{'type': 'resize-plan'}
]
}, validate=True)
@arm_template('appserviceplan.json')
@cassette_name('window_plans')
def test_resize_plan_win(self):
with patch(self.update_mock_path) as update_mock:
p = self.load_policy({
'name': 'test-azure-appserviceplan-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-win'},
{'type': 'value',
'key': 'sku.name',
'op': 'eq',
'value': 'S1'}
],
'actions': [
{'type': 'resize-plan',
'size': 'B1',
'count': 2}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-win', args[1])
self.assertEqual('B1', args[2].sku.name)
self.assertEqual('BASIC', args[2].sku.tier)
self.assertEqual(2, args[2].sku.capacity)
@arm_template('appserviceplan-linux.json')
@cassette_name('linux_plans')
def test_resize_plan_linux(self):
with patch(self.update_mock_path) as update_mock:
p = self.load_policy({
'name': 'test-azure-appserviceplan-linux',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-linux'},
{'type': 'value',
'key': 'sku.name',
'op': 'eq',
'value': 'S1'}
],
'actions': [
{'type': 'resize-plan',
'size': 'B1',
'count': 3}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-linux', args[1])
self.assertEqual('B1', args[2].sku.name)
self.assertEqual('BASIC', args[2].sku.tier)
self.assertEqual(3, args[2].sku.capacity)
@arm_template('appserviceplan.json')
@cassette_name('window_plans')
def test_resize_plan_from_resource_tag(self):
with patch(self.update_mock_path) as update_mock:
p = self.load_policy({
'name': 'test-azure-appserviceplan',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-win'}],
'actions': [
{'type': 'resize-plan',
'size': {
'type': 'resource',
'key': 'tags.sku'
}}],
})
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-win', args[1])
self.assertEqual('B1', args[2].sku.name)
self.assertEqual('BASIC', args[2].sku.tier)
@arm_template('appserviceplan.json')
@patch('c7n_azure.resources.appserviceplan.ResizePlan.log.info')
@cassette_name('window_plans')
def test_resize_consumption_win(self, logger):
p = self.load_policy({
'name': 'test-azure-consumption-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-consumption-win'}
],
'actions': [
{'type': 'resize-plan',
'size': 'F1'}]
}, validate=True)
p.run()
logger.assert_any_call(
'Skipping cctest-consumption-win, '
'because this App Service Plan is for Consumption Azure Functions.')
@arm_template('appserviceplan-linux.json')
@patch('c7n_azure.resources.appserviceplan.ResizePlan.log.info')
@cassette_name('linux_plans')
def test_resize_consumption_linux(self, logger):
p = self.load_policy({
'name': 'test-azure-appserviceplan-linux',
'resource': 'azure.appserviceplan',
'filters': [
{'resourceGroup': 'test_appserviceplan-linux'},
{'type': 'value',
'key': 'name',
'op': 'ne',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-linux'}
],
'actions': [
{'type': 'resize-plan',
'size': 'F1'}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
logger.assert_any_call(
'Skipping {}, because this App Service Plan is for Consumption Azure Functions.'.format(
resources[0]['name']
))
@arm_template('appserviceplan.json')
@cassette_name('window_plans')
def test_resize_plan_win_only_count(self):
with patch(self.update_mock_path) as update_mock:
p = self.load_policy({
'name': 'test-azure-appserviceplan-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-win'},
{'type': 'value',
'key': 'sku.name',
'op': 'eq',
'value': 'S1'}
],
'actions': [
{'type': 'resize-plan',
'count': 3}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-win', args[1])
self.assertEqual('S1', args[2].sku.name)
self.assertEqual('Standard', args[2].sku.tier)
self.assertEqual(3, args[2].sku.capacity)
| {
"content_hash": "2a802d30f6f098ac6986ef5e1ecf9ad2",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 100,
"avg_line_length": 38.66519823788546,
"alnum_prop": 0.4748775207929817,
"repo_name": "kapilt/cloud-custodian",
"id": "3443a836ad32ecf26a12a40835a6b1e75be25785",
"size": "9367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests_azure/tests_resources/test_app_service_plan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8163"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5354902"
},
{
"name": "Shell",
"bytes": "13032"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
import httplib
from datetime import datetime, timedelta
from mock import MagicMock, Mock, patch
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
import boto.ec2
from boto.regioninfo import RegionInfo
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.ec2.connection import EC2Connection
from boto.ec2.snapshot import Snapshot
from boto.ec2.reservedinstance import ReservedInstancesConfiguration
class TestEC2ConnectionBase(AWSMockServiceTestCase):
connection_class = EC2Connection
def setUp(self):
super(TestEC2ConnectionBase, self).setUp()
self.ec2 = self.service_connection
class TestReservedInstanceOfferings(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeReservedInstancesOfferingsResponse>
<requestId>d3253568-edcf-4897-9a3d-fb28e0b3fa38</requestId>
<reservedInstancesOfferingsSet>
<item>
<reservedInstancesOfferingId>2964d1bf71d8</reservedInstancesOfferingId>
<instanceType>c1.medium</instanceType>
<availabilityZone>us-east-1c</availabilityZone>
<duration>94608000</duration>
<fixedPrice>775.0</fixedPrice>
<usagePrice>0.0</usagePrice>
<productDescription>product description</productDescription>
<instanceTenancy>default</instanceTenancy>
<currencyCode>USD</currencyCode>
<offeringType>Heavy Utilization</offeringType>
<recurringCharges>
<item>
<frequency>Hourly</frequency>
<amount>0.095</amount>
</item>
</recurringCharges>
<marketplace>false</marketplace>
<pricingDetailsSet>
<item>
<price>0.045</price>
<count>1</count>
</item>
</pricingDetailsSet>
</item>
<item>
<reservedInstancesOfferingId>2dce26e46889</reservedInstancesOfferingId>
<instanceType>c1.medium</instanceType>
<availabilityZone>us-east-1c</availabilityZone>
<duration>94608000</duration>
<fixedPrice>775.0</fixedPrice>
<usagePrice>0.0</usagePrice>
<productDescription>Linux/UNIX</productDescription>
<instanceTenancy>default</instanceTenancy>
<currencyCode>USD</currencyCode>
<offeringType>Heavy Utilization</offeringType>
<recurringCharges>
<item>
<frequency>Hourly</frequency>
<amount>0.035</amount>
</item>
</recurringCharges>
<marketplace>false</marketplace>
<pricingDetailsSet/>
</item>
</reservedInstancesOfferingsSet>
<nextToken>next_token</nextToken>
</DescribeReservedInstancesOfferingsResponse>
"""
def test_get_reserved_instance_offerings(self):
self.set_http_response(status_code=200)
response = self.ec2.get_all_reserved_instances_offerings()
self.assertEqual(len(response), 2)
instance = response[0]
self.assertEqual(instance.id, '2964d1bf71d8')
self.assertEqual(instance.instance_type, 'c1.medium')
self.assertEqual(instance.availability_zone, 'us-east-1c')
self.assertEqual(instance.duration, 94608000)
self.assertEqual(instance.fixed_price, '775.0')
self.assertEqual(instance.usage_price, '0.0')
self.assertEqual(instance.description, 'product description')
self.assertEqual(instance.instance_tenancy, 'default')
self.assertEqual(instance.currency_code, 'USD')
self.assertEqual(instance.offering_type, 'Heavy Utilization')
self.assertEqual(len(instance.recurring_charges), 1)
self.assertEqual(instance.recurring_charges[0].frequency, 'Hourly')
self.assertEqual(instance.recurring_charges[0].amount, '0.095')
self.assertEqual(len(instance.pricing_details), 1)
self.assertEqual(instance.pricing_details[0].price, '0.045')
self.assertEqual(instance.pricing_details[0].count, '1')
def test_get_reserved_instance_offerings_params(self):
self.set_http_response(status_code=200)
self.ec2.get_all_reserved_instances_offerings(
reserved_instances_offering_ids=['id1','id2'],
instance_type='t1.micro',
availability_zone='us-east-1',
product_description='description',
instance_tenancy='dedicated',
offering_type='offering_type',
include_marketplace=False,
min_duration=100,
max_duration=1000,
max_instance_count=1,
next_token='next_token',
max_results=10
)
self.assert_request_parameters({
'Action': 'DescribeReservedInstancesOfferings',
'ReservedInstancesOfferingId.1': 'id1',
'ReservedInstancesOfferingId.2': 'id2',
'InstanceType': 't1.micro',
'AvailabilityZone': 'us-east-1',
'ProductDescription': 'description',
'InstanceTenancy': 'dedicated',
'OfferingType': 'offering_type',
'IncludeMarketplace': 'false',
'MinDuration': '100',
'MaxDuration': '1000',
'MaxInstanceCount': '1',
'NextToken': 'next_token',
'MaxResults': '10',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestPurchaseReservedInstanceOffering(TestEC2ConnectionBase):
def default_body(self):
return """<PurchaseReservedInstancesOffering />"""
def test_serialized_api_args(self):
self.set_http_response(status_code=200)
response = self.ec2.purchase_reserved_instance_offering(
'offering_id', 1, (100.0, 'USD'))
self.assert_request_parameters({
'Action': 'PurchaseReservedInstancesOffering',
'InstanceCount': 1,
'ReservedInstancesOfferingId': 'offering_id',
'LimitPrice.Amount': '100.0',
'LimitPrice.CurrencyCode': 'USD',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestCreateImage(TestEC2ConnectionBase):
def default_body(self):
return """<CreateImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<imageId>ami-4fa54026</imageId>
</CreateImageResponse>"""
def test_minimal(self):
self.set_http_response(status_code=200)
response = self.ec2.create_image(
'instance_id', 'name')
self.assert_request_parameters({
'Action': 'CreateImage',
'InstanceId': 'instance_id',
'Name': 'name'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_block_device_mapping(self):
self.set_http_response(status_code=200)
bdm = BlockDeviceMapping()
bdm['test'] = BlockDeviceType()
response = self.ec2.create_image(
'instance_id', 'name', block_device_mapping=bdm)
self.assert_request_parameters({
'Action': 'CreateImage',
'InstanceId': 'instance_id',
'Name': 'name',
'BlockDeviceMapping.1.DeviceName': 'test',
'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestCancelReservedInstancesListing(TestEC2ConnectionBase):
def default_body(self):
return """
<CancelReservedInstancesListingResponse>
<requestId>request_id</requestId>
<reservedInstancesListingsSet>
<item>
<reservedInstancesListingId>listing_id</reservedInstancesListingId>
<reservedInstancesId>instance_id</reservedInstancesId>
<createDate>2012-07-12T16:55:28.000Z</createDate>
<updateDate>2012-07-12T16:55:28.000Z</updateDate>
<status>cancelled</status>
<statusMessage>CANCELLED</statusMessage>
<instanceCounts>
<item>
<state>Available</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Sold</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Cancelled</state>
<instanceCount>1</instanceCount>
</item>
<item>
<state>Pending</state>
<instanceCount>0</instanceCount>
</item>
</instanceCounts>
<priceSchedules>
<item>
<term>5</term>
<price>166.64</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>4</term>
<price>133.32</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>3</term>
<price>99.99</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>2</term>
<price>66.66</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>1</term>
<price>33.33</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
</priceSchedules>
<tagSet/>
<clientToken>XqJIt1342112125076</clientToken>
</item>
</reservedInstancesListingsSet>
</CancelReservedInstancesListingResponse>
"""
def test_reserved_instances_listing(self):
self.set_http_response(status_code=200)
response = self.ec2.cancel_reserved_instances_listing()
self.assertEqual(len(response), 1)
cancellation = response[0]
self.assertEqual(cancellation.status, 'cancelled')
self.assertEqual(cancellation.status_message, 'CANCELLED')
self.assertEqual(len(cancellation.instance_counts), 4)
first = cancellation.instance_counts[0]
self.assertEqual(first.state, 'Available')
self.assertEqual(first.instance_count, 0)
self.assertEqual(len(cancellation.price_schedules), 5)
schedule = cancellation.price_schedules[0]
self.assertEqual(schedule.term, 5)
self.assertEqual(schedule.price, '166.64')
self.assertEqual(schedule.currency_code, 'USD')
self.assertEqual(schedule.active, False)
class TestCreateReservedInstancesListing(TestEC2ConnectionBase):
def default_body(self):
return """
<CreateReservedInstancesListingResponse>
<requestId>request_id</requestId>
<reservedInstancesListingsSet>
<item>
<reservedInstancesListingId>listing_id</reservedInstancesListingId>
<reservedInstancesId>instance_id</reservedInstancesId>
<createDate>2012-07-17T17:11:09.449Z</createDate>
<updateDate>2012-07-17T17:11:09.468Z</updateDate>
<status>active</status>
<statusMessage>ACTIVE</statusMessage>
<instanceCounts>
<item>
<state>Available</state>
<instanceCount>1</instanceCount>
</item>
<item>
<state>Sold</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Cancelled</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Pending</state>
<instanceCount>0</instanceCount>
</item>
</instanceCounts>
<priceSchedules>
<item>
<term>11</term>
<price>2.5</price>
<currencyCode>USD</currencyCode>
<active>true</active>
</item>
<item>
<term>10</term>
<price>2.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>9</term>
<price>2.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>8</term>
<price>2.0</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>7</term>
<price>2.0</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>6</term>
<price>2.0</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>5</term>
<price>1.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>4</term>
<price>1.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>3</term>
<price>0.7</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>2</term>
<price>0.7</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>1</term>
<price>0.1</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
</priceSchedules>
<tagSet/>
<clientToken>myIdempToken1</clientToken>
</item>
</reservedInstancesListingsSet>
</CreateReservedInstancesListingResponse>
"""
def test_create_reserved_instances_listing(self):
self.set_http_response(status_code=200)
response = self.ec2.create_reserved_instances_listing(
'instance_id', 1, [('2.5', 11), ('2.0', 8)], 'client_token')
self.assertEqual(len(response), 1)
cancellation = response[0]
self.assertEqual(cancellation.status, 'active')
self.assertEqual(cancellation.status_message, 'ACTIVE')
self.assertEqual(len(cancellation.instance_counts), 4)
first = cancellation.instance_counts[0]
self.assertEqual(first.state, 'Available')
self.assertEqual(first.instance_count, 1)
self.assertEqual(len(cancellation.price_schedules), 11)
schedule = cancellation.price_schedules[0]
self.assertEqual(schedule.term, 11)
self.assertEqual(schedule.price, '2.5')
self.assertEqual(schedule.currency_code, 'USD')
self.assertEqual(schedule.active, True)
self.assert_request_parameters({
'Action': 'CreateReservedInstancesListing',
'ReservedInstancesId': 'instance_id',
'InstanceCount': '1',
'ClientToken': 'client_token',
'PriceSchedules.0.Price': '2.5',
'PriceSchedules.0.Term': '11',
'PriceSchedules.1.Price': '2.0',
'PriceSchedules.1.Term': '8',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestDescribeSpotInstanceRequests(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeSpotInstanceRequestsResponse>
<requestId>requestid</requestId>
<spotInstanceRequestSet>
<item>
<spotInstanceRequestId>sir-id</spotInstanceRequestId>
<spotPrice>0.003000</spotPrice>
<type>one-time</type>
<state>active</state>
<status>
<code>fulfilled</code>
<updateTime>2012-10-19T18:09:26.000Z</updateTime>
<message>Your Spot request is fulfilled.</message>
</status>
<launchGroup>mylaunchgroup</launchGroup>
<launchSpecification>
<imageId>ami-id</imageId>
<keyName>mykeypair</keyName>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>groupname</groupName>
</item>
</groupSet>
<instanceType>t1.micro</instanceType>
<monitoring>
<enabled>false</enabled>
</monitoring>
</launchSpecification>
<instanceId>i-id</instanceId>
<createTime>2012-10-19T18:07:05.000Z</createTime>
<productDescription>Linux/UNIX</productDescription>
<launchedAvailabilityZone>us-east-1d</launchedAvailabilityZone>
</item>
</spotInstanceRequestSet>
</DescribeSpotInstanceRequestsResponse>
"""
def test_describe_spot_instance_requets(self):
self.set_http_response(status_code=200)
response = self.ec2.get_all_spot_instance_requests()
self.assertEqual(len(response), 1)
spotrequest = response[0]
self.assertEqual(spotrequest.id, 'sir-id')
self.assertEqual(spotrequest.price, 0.003)
self.assertEqual(spotrequest.type, 'one-time')
self.assertEqual(spotrequest.state, 'active')
self.assertEqual(spotrequest.fault, None)
self.assertEqual(spotrequest.valid_from, None)
self.assertEqual(spotrequest.valid_until, None)
self.assertEqual(spotrequest.launch_group, 'mylaunchgroup')
self.assertEqual(spotrequest.launched_availability_zone, 'us-east-1d')
self.assertEqual(spotrequest.product_description, 'Linux/UNIX')
self.assertEqual(spotrequest.availability_zone_group, None)
self.assertEqual(spotrequest.create_time,
'2012-10-19T18:07:05.000Z')
self.assertEqual(spotrequest.instance_id, 'i-id')
launch_spec = spotrequest.launch_specification
self.assertEqual(launch_spec.key_name, 'mykeypair')
self.assertEqual(launch_spec.instance_type, 't1.micro')
self.assertEqual(launch_spec.image_id, 'ami-id')
self.assertEqual(launch_spec.placement, None)
self.assertEqual(launch_spec.kernel, None)
self.assertEqual(launch_spec.ramdisk, None)
self.assertEqual(launch_spec.monitored, False)
self.assertEqual(launch_spec.subnet_id, None)
self.assertEqual(launch_spec.block_device_mapping, None)
self.assertEqual(launch_spec.instance_profile, None)
self.assertEqual(launch_spec.ebs_optimized, False)
status = spotrequest.status
self.assertEqual(status.code, 'fulfilled')
self.assertEqual(status.update_time, '2012-10-19T18:09:26.000Z')
self.assertEqual(status.message, 'Your Spot request is fulfilled.')
class TestCopySnapshot(TestEC2ConnectionBase):
def default_body(self):
return """
<CopySnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>request_id</requestId>
<snapshotId>snap-copied-id</snapshotId>
</CopySnapshotResponse>
"""
def test_copy_snapshot(self):
self.set_http_response(status_code=200)
snapshot_id = self.ec2.copy_snapshot('us-west-2', 'snap-id',
'description')
self.assertEqual(snapshot_id, 'snap-copied-id')
self.assert_request_parameters({
'Action': 'CopySnapshot',
'Description': 'description',
'SourceRegion': 'us-west-2',
'SourceSnapshotId': 'snap-id'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestCopyImage(TestEC2ConnectionBase):
def default_body(self):
return """
<CopyImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>request_id</requestId>
<imageId>ami-copied-id</imageId>
</CopyImageResponse>
"""
def test_copy_image(self):
self.set_http_response(status_code=200)
copied_ami = self.ec2.copy_image('us-west-2', 'ami-id',
'name', 'description', 'client-token')
self.assertEqual(copied_ami.image_id, 'ami-copied-id')
self.assert_request_parameters({
'Action': 'CopyImage',
'Description': 'description',
'Name': 'name',
'SourceRegion': 'us-west-2',
'SourceImageId': 'ami-id',
'ClientToken': 'client-token'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_copy_image_without_name(self):
self.set_http_response(status_code=200)
copied_ami = self.ec2.copy_image('us-west-2', 'ami-id',
description='description',
client_token='client-token')
self.assertEqual(copied_ami.image_id, 'ami-copied-id')
self.assert_request_parameters({
'Action': 'CopyImage',
'Description': 'description',
'SourceRegion': 'us-west-2',
'SourceImageId': 'ami-id',
'ClientToken': 'client-token'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestAccountAttributes(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeAccountAttributesResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>6d042e8a-4bc3-43e8-8265-3cbc54753f14</requestId>
<accountAttributeSet>
<item>
<attributeName>vpc-max-security-groups-per-interface</attributeName>
<attributeValueSet>
<item>
<attributeValue>5</attributeValue>
</item>
</attributeValueSet>
</item>
<item>
<attributeName>max-instances</attributeName>
<attributeValueSet>
<item>
<attributeValue>50</attributeValue>
</item>
</attributeValueSet>
</item>
<item>
<attributeName>supported-platforms</attributeName>
<attributeValueSet>
<item>
<attributeValue>EC2</attributeValue>
</item>
<item>
<attributeValue>VPC</attributeValue>
</item>
</attributeValueSet>
</item>
<item>
<attributeName>default-vpc</attributeName>
<attributeValueSet>
<item>
<attributeValue>none</attributeValue>
</item>
</attributeValueSet>
</item>
</accountAttributeSet>
</DescribeAccountAttributesResponse>
"""
def test_describe_account_attributes(self):
self.set_http_response(status_code=200)
parsed = self.ec2.describe_account_attributes()
self.assertEqual(len(parsed), 4)
self.assertEqual(parsed[0].attribute_name,
'vpc-max-security-groups-per-interface')
self.assertEqual(parsed[0].attribute_values,
['5'])
self.assertEqual(parsed[-1].attribute_name,
'default-vpc')
self.assertEqual(parsed[-1].attribute_values,
['none'])
class TestDescribeVPCAttribute(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>request_id</requestId>
<vpcId>vpc-id</vpcId>
<enableDnsHostnames>
<value>false</value>
</enableDnsHostnames>
</DescribeVpcAttributeResponse>
"""
def test_describe_vpc_attribute(self):
self.set_http_response(status_code=200)
parsed = self.ec2.describe_vpc_attribute('vpc-id',
'enableDnsHostnames')
self.assertEqual(parsed.vpc_id, 'vpc-id')
self.assertFalse(parsed.enable_dns_hostnames)
self.assert_request_parameters({
'Action': 'DescribeVpcAttribute',
'VpcId': 'vpc-id',
'Attribute': 'enableDnsHostnames',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestGetAllNetworkInterfaces(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeNetworkInterfacesResponse xmlns="http://ec2.amazonaws.com/\
doc/2013-06-15/">
<requestId>fc45294c-006b-457b-bab9-012f5b3b0e40</requestId>
<networkInterfaceSet>
<item>
<networkInterfaceId>eni-0f62d866</networkInterfaceId>
<subnetId>subnet-c53c87ac</subnetId>
<vpcId>vpc-cc3c87a5</vpcId>
<availabilityZone>ap-southeast-1b</availabilityZone>
<description/>
<ownerId>053230519467</ownerId>
<requesterManaged>false</requesterManaged>
<status>in-use</status>
<macAddress>02:81:60:cb:27:37</macAddress>
<privateIpAddress>10.0.0.146</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-3f4b5653</groupId>
<groupName>default</groupName>
</item>
</groupSet>
<attachment>
<attachmentId>eni-attach-6537fc0c</attachmentId>
<instanceId>i-22197876</instanceId>
<instanceOwnerId>053230519467</instanceOwnerId>
<deviceIndex>5</deviceIndex>
<status>attached</status>
<attachTime>2012-07-01T21:45:27.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
<tagSet/>
<privateIpAddressesSet>
<item>
<privateIpAddress>10.0.0.146</privateIpAddress>
<primary>true</primary>
</item>
<item>
<privateIpAddress>10.0.0.148</privateIpAddress>
<primary>false</primary>
</item>
<item>
<privateIpAddress>10.0.0.150</privateIpAddress>
<primary>false</primary>
</item>
</privateIpAddressesSet>
</item>
</networkInterfaceSet>
</DescribeNetworkInterfacesResponse>"""
def test_attachment_has_device_index(self):
self.set_http_response(status_code=200)
parsed = self.ec2.get_all_network_interfaces()
self.assertEqual(5, parsed[0].attachment.device_index)
class TestGetAllImages(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeImagesResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>e32375e8-4ac3-4099-a8bf-3ec902b9023e</requestId>
<imagesSet>
<item>
<imageId>ami-abcd1234</imageId>
<imageLocation>111111111111/windows2008r2-hvm-i386-20130702</imageLocation>
<imageState>available</imageState>
<imageOwnerId>111111111111</imageOwnerId>
<isPublic>false</isPublic>
<architecture>i386</architecture>
<imageType>machine</imageType>
<platform>windows</platform>
<viridianEnabled>true</viridianEnabled>
<name>Windows Test</name>
<description>Windows Test Description</description>
<billingProducts>
<item>
<billingProduct>bp-6ba54002</billingProduct>
</item>
</billingProducts>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
<item>
<deviceName>/dev/sda1</deviceName>
<ebs>
<snapshotId>snap-abcd1234</snapshotId>
<volumeSize>30</volumeSize>
<deleteOnTermination>true</deleteOnTermination>
<volumeType>standard</volumeType>
</ebs>
</item>
<item>
<deviceName>xvdb</deviceName>
<virtualName>ephemeral0</virtualName>
</item>
<item>
<deviceName>xvdc</deviceName>
<virtualName>ephemeral1</virtualName>
</item>
<item>
<deviceName>xvdd</deviceName>
<virtualName>ephemeral2</virtualName>
</item>
<item>
<deviceName>xvde</deviceName>
<virtualName>ephemeral3</virtualName>
</item>
</blockDeviceMapping>
<virtualizationType>hvm</virtualizationType>
<hypervisor>xen</hypervisor>
</item>
</imagesSet>
</DescribeImagesResponse>"""
def test_get_all_images(self):
self.set_http_response(status_code=200)
parsed = self.ec2.get_all_images()
self.assertEquals(1, len(parsed))
self.assertEquals("ami-abcd1234", parsed[0].id)
self.assertEquals("111111111111/windows2008r2-hvm-i386-20130702", parsed[0].location)
self.assertEquals("available", parsed[0].state)
self.assertEquals("111111111111", parsed[0].ownerId)
self.assertEquals("111111111111", parsed[0].owner_id)
self.assertEquals(False, parsed[0].is_public)
self.assertEquals("i386", parsed[0].architecture)
self.assertEquals("machine", parsed[0].type)
self.assertEquals(None, parsed[0].kernel_id)
self.assertEquals(None, parsed[0].ramdisk_id)
self.assertEquals(None, parsed[0].owner_alias)
self.assertEquals("windows", parsed[0].platform)
self.assertEquals("Windows Test", parsed[0].name)
self.assertEquals("Windows Test Description", parsed[0].description)
self.assertEquals("ebs", parsed[0].root_device_type)
self.assertEquals("/dev/sda1", parsed[0].root_device_name)
self.assertEquals("hvm", parsed[0].virtualization_type)
self.assertEquals("xen", parsed[0].hypervisor)
self.assertEquals(None, parsed[0].instance_lifecycle)
# 1 billing product parsed into a list
self.assertEquals(1, len(parsed[0].billing_products))
self.assertEquals("bp-6ba54002", parsed[0].billing_products[0])
# Just verify length, there is already a block_device_mapping test
self.assertEquals(5, len(parsed[0].block_device_mapping))
# TODO: No tests for product codes?
class TestModifyInterfaceAttribute(TestEC2ConnectionBase):
def default_body(self):
return """
<ModifyNetworkInterfaceAttributeResponse \
xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
<requestId>657a4623-5620-4232-b03b-427e852d71cf</requestId>
<return>true</return>
</ModifyNetworkInterfaceAttributeResponse>
"""
def test_modify_description(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'description', 'foo')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'Description.Value': 'foo'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_source_dest_check_bool(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'sourceDestCheck',
True)
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'SourceDestCheck.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_source_dest_check_str(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'sourceDestCheck',
'true')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'SourceDestCheck.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_source_dest_check_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaises(ValueError):
self.ec2.modify_network_interface_attribute('id',
'sourceDestCheck',
123)
def test_modify_delete_on_termination_str(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id',
'deleteOnTermination',
True, attachment_id='bar')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'Attachment.AttachmentId': 'bar',
'Attachment.DeleteOnTermination': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_delete_on_termination_bool(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id',
'deleteOnTermination',
'false',
attachment_id='bar')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'Attachment.AttachmentId': 'bar',
'Attachment.DeleteOnTermination': 'false'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_delete_on_termination_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaises(ValueError):
self.ec2.modify_network_interface_attribute('id',
'deleteOnTermination',
123,
attachment_id='bar')
def test_modify_group_set_list(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'groupSet',
['sg-1', 'sg-2'])
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'SecurityGroupId.1': 'sg-1',
'SecurityGroupId.2': 'sg-2'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_group_set_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaisesRegexp(TypeError, 'iterable'):
self.ec2.modify_network_interface_attribute('id', 'groupSet',
False)
def test_modify_attr_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaisesRegexp(ValueError, 'Unknown attribute'):
self.ec2.modify_network_interface_attribute('id', 'invalid', 0)
class TestConnectToRegion(unittest.TestCase):
def setUp(self):
self.https_connection = Mock(spec=httplib.HTTPSConnection)
self.https_connection_factory = (
Mock(return_value=self.https_connection), ())
def test_aws_region(self):
region = boto.ec2.RegionData.keys()[0]
self.ec2 = boto.ec2.connect_to_region(region,
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key'
)
self.assertEqual(boto.ec2.RegionData[region], self.ec2.host)
def test_non_aws_region(self):
self.ec2 = boto.ec2.connect_to_region('foo',
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
region = RegionInfo(name='foo', endpoint='https://foo.com/bar')
)
self.assertEqual('https://foo.com/bar', self.ec2.host)
def test_missing_region(self):
self.ec2 = boto.ec2.connect_to_region('foo',
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key'
)
self.assertEqual(None, self.ec2)
class TestTrimSnapshots(TestEC2ConnectionBase):
"""
Test snapshot trimming functionality by ensuring that expected calls
are made when given a known set of volume snapshots.
"""
def _get_snapshots(self):
"""
Generate a list of fake snapshots with names and dates.
"""
snaps = []
# Generate some dates offset by days, weeks, months.
# This is to validate the various types of snapshot logic handled by
# ``trim_snapshots``.
now = datetime.now()
dates = [
now,
now - timedelta(days=1),
now - timedelta(days=2),
now - timedelta(days=7),
now - timedelta(days=14),
# We want to simulate 30/60/90-day snapshots, but February is
# short (only 28 days), so we decrease the delta by 2 days apiece.
# This prevents the ``delete_snapshot`` code below from being
# called, since they don't fall outside the allowed timeframes
# for the snapshots.
datetime(now.year, now.month, 1) - timedelta(days=28),
datetime(now.year, now.month, 1) - timedelta(days=58),
datetime(now.year, now.month, 1) - timedelta(days=88)
]
for date in dates:
# Create a fake snapshot for each date
snap = Snapshot(self.ec2)
snap.tags['Name'] = 'foo'
# Times are expected to be ISO8601 strings
snap.start_time = date.strftime('%Y-%m-%dT%H:%M:%S.000Z')
snaps.append(snap)
return snaps
def test_trim_defaults(self):
"""
Test trimming snapshots with the default arguments, which should
keep all monthly backups forever. The result of this test should
be that nothing is deleted.
"""
# Setup mocks
orig = {
'get_all_snapshots': self.ec2.get_all_snapshots,
'delete_snapshot': self.ec2.delete_snapshot
}
snaps = self._get_snapshots()
self.ec2.get_all_snapshots = MagicMock(return_value=snaps)
self.ec2.delete_snapshot = MagicMock()
# Call the tested method
self.ec2.trim_snapshots()
# Assertions
self.assertEqual(True, self.ec2.get_all_snapshots.called)
self.assertEqual(False, self.ec2.delete_snapshot.called)
# Restore
self.ec2.get_all_snapshots = orig['get_all_snapshots']
self.ec2.delete_snapshot = orig['delete_snapshot']
def test_trim_months(self):
"""
Test trimming monthly snapshots and ensure that older months
get deleted properly. The result of this test should be that
the two oldest snapshots get deleted.
"""
# Setup mocks
orig = {
'get_all_snapshots': self.ec2.get_all_snapshots,
'delete_snapshot': self.ec2.delete_snapshot
}
snaps = self._get_snapshots()
self.ec2.get_all_snapshots = MagicMock(return_value=snaps)
self.ec2.delete_snapshot = MagicMock()
# Call the tested method
self.ec2.trim_snapshots(monthly_backups=1)
# Assertions
self.assertEqual(True, self.ec2.get_all_snapshots.called)
self.assertEqual(2, self.ec2.delete_snapshot.call_count)
# Restore
self.ec2.get_all_snapshots = orig['get_all_snapshots']
self.ec2.delete_snapshot = orig['delete_snapshot']
class TestModifyReservedInstances(TestEC2ConnectionBase):
def default_body(self):
return """<ModifyReservedInstancesResponse xmlns='http://ec2.amazonaws.com/doc/2013-08-15/'>
<requestId>bef729b6-0731-4489-8881-2258746ae163</requestId>
<reservedInstancesModificationId>rimod-3aae219d-3d63-47a9-a7e9-e764example</reservedInstancesModificationId>
</ModifyReservedInstancesResponse>"""
def test_serialized_api_args(self):
self.set_http_response(status_code=200)
response = self.ec2.modify_reserved_instances(
'a-token-goes-here',
reserved_instance_ids=[
'2567o137-8a55-48d6-82fb-7258506bb497',
],
target_configurations=[
ReservedInstancesConfiguration(
availability_zone='us-west-2c',
platform='EC2-VPC',
instance_count=3
),
]
)
self.assert_request_parameters({
'Action': 'ModifyReservedInstances',
'ClientToken': 'a-token-goes-here',
'ReservedInstancesConfigurationSetItemType.0.AvailabilityZone': 'us-west-2c',
'ReservedInstancesConfigurationSetItemType.0.InstanceCount': 3,
'ReservedInstancesConfigurationSetItemType.0.Platform': 'EC2-VPC',
'ReservedInstancesId.1': '2567o137-8a55-48d6-82fb-7258506bb497'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
self.assertEqual(response, 'rimod-3aae219d-3d63-47a9-a7e9-e764example')
class TestDescribeReservedInstancesModifications(TestEC2ConnectionBase):
def default_body(self):
return """<DescribeReservedInstancesModificationsResponse xmlns='http://ec2.amazonaws.com/doc/2013-08-15/'>
<requestId>eb4a6e3c-3689-445c-b536-19e38df35898</requestId>
<reservedInstancesModificationsSet>
<item>
<reservedInstancesModificationId>rimod-49b9433e-fdc7-464a-a6e5-9dabcexample</reservedInstancesModificationId>
<reservedInstancesSet>
<item>
<reservedInstancesId>2567o137-8a55-48d6-82fb-7258506bb497</reservedInstancesId>
</item>
</reservedInstancesSet>
<modificationResultSet>
<item>
<reservedInstancesId>9d5cb137-5d65-4479-b4ac-8c337example</reservedInstancesId>
<targetConfiguration>
<availabilityZone>us-east-1b</availabilityZone>
<platform>EC2-VPC</platform>
<instanceCount>1</instanceCount>
</targetConfiguration>
</item>
</modificationResultSet>
<createDate>2013-09-02T21:20:19.637Z</createDate>
<updateDate>2013-09-02T21:38:24.143Z</updateDate>
<effectiveDate>2013-09-02T21:00:00.000Z</effectiveDate>
<status>fulfilled</status>
<clientToken>token-f5b56c05-09b0-4d17-8d8c-c75d8a67b806</clientToken>
</item>
</reservedInstancesModificationsSet>
</DescribeReservedInstancesModificationsResponse>"""
def test_serialized_api_args(self):
self.set_http_response(status_code=200)
response = self.ec2.describe_reserved_instances_modifications(
reserved_instances_modification_ids=[
'2567o137-8a55-48d6-82fb-7258506bb497'
],
filters={
'status': 'processing',
}
)
self.assert_request_parameters({
'Action': 'DescribeReservedInstancesModifications',
'Filter.1.Name': 'status',
'Filter.1.Value.1': 'processing',
'ReservedInstancesModificationId.1': '2567o137-8a55-48d6-82fb-7258506bb497'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
# Make sure the response was parsed correctly.
self.assertEqual(
response[0].modification_id,
'rimod-49b9433e-fdc7-464a-a6e5-9dabcexample'
)
self.assertEqual(
response[0].create_date,
datetime(2013, 9, 2, 21, 20, 19, 637000)
)
self.assertEqual(
response[0].update_date,
datetime(2013, 9, 2, 21, 38, 24, 143000)
)
self.assertEqual(
response[0].effective_date,
datetime(2013, 9, 2, 21, 0, 0, 0)
)
self.assertEqual(
response[0].status,
'fulfilled'
)
self.assertEqual(
response[0].status_message,
None
)
self.assertEqual(
response[0].client_token,
'token-f5b56c05-09b0-4d17-8d8c-c75d8a67b806'
)
self.assertEqual(
response[0].reserved_instances[0].id,
'2567o137-8a55-48d6-82fb-7258506bb497'
)
self.assertEqual(
response[0].modification_results[0].availability_zone,
'us-east-1b'
)
self.assertEqual(
response[0].modification_results[0].platform,
'EC2-VPC'
)
self.assertEqual(
response[0].modification_results[0].instance_count,
1
)
self.assertEqual(len(response), 1)
class TestRegisterImage(TestEC2ConnectionBase):
def default_body(self):
return """
<RegisterImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-08-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<imageId>ami-1a2b3c4d</imageId>
</RegisterImageResponse>
"""
def test_vm_type_default(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
image_location='s3://foo')
self.assert_request_parameters({
'Action': 'RegisterImage',
'ImageLocation': 's3://foo',
'Name': 'name',
'Description': 'description',
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
def test_vm_type_hvm(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
image_location='s3://foo',
virtualization_type='hvm')
self.assert_request_parameters({
'Action': 'RegisterImage',
'ImageLocation': 's3://foo',
'Name': 'name',
'Description': 'description',
'VirtualizationType': 'hvm'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
def test_sriov_net_support_simple(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
image_location='s3://foo',
sriov_net_support='simple')
self.assert_request_parameters({
'Action': 'RegisterImage',
'ImageLocation': 's3://foo',
'Name': 'name',
'Description': 'description',
'SriovNetSupport': 'simple'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
class TestTerminateInstances(TestEC2ConnectionBase):
def default_body(self):
return """<?xml version="1.0" ?>
<TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>req-59a9ad52-0434-470c-ad48-4f89ded3a03e</requestId>
<instancesSet>
<item>
<instanceId>i-000043a2</instanceId>
<shutdownState>
<code>16</code>
<name>running</name>
</shutdownState>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
</item>
</instancesSet>
</TerminateInstancesResponse>
"""
def test_terminate_bad_response(self):
self.set_http_response(status_code=200)
self.ec2.terminate_instances('foo')
class TestDescribeInstances(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeInstancesResponse>
</DescribeInstancesResponse>
"""
def test_default_behavior(self):
self.set_http_response(status_code=200)
self.ec2.get_all_instances()
self.assert_request_parameters({
'Action': 'DescribeInstances'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_max_results(self):
self.set_http_response(status_code=200)
self.ec2.get_all_instances(
max_results=10
)
self.assert_request_parameters({
'Action': 'DescribeInstances',
'MaxResults': 10},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestDescribeTags(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeTagsResponse>
</DescribeTagsResponse>
"""
def test_default_behavior(self):
self.set_http_response(status_code=200)
self.ec2.get_all_tags()
self.assert_request_parameters({
'Action': 'DescribeTags'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_max_results(self):
self.set_http_response(status_code=200)
self.ec2.get_all_tags(
max_results=10
)
self.assert_request_parameters({
'Action': 'DescribeTags',
'MaxResults': 10},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestSignatureAlteration(TestEC2ConnectionBase):
def test_unchanged(self):
self.assertEqual(
self.service_connection._required_auth_capability(),
['ec2']
)
def test_switched(self):
region = RegionInfo(
name='cn-north-1',
endpoint='ec2.cn-north-1.amazonaws.com.cn',
connection_cls=EC2Connection
)
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
region=region
)
self.assertEqual(
conn._required_auth_capability(),
['hmac-v4']
)
class TestAssociateAddress(TestEC2ConnectionBase):
def default_body(self):
return """
<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
<associationId>eipassoc-fc5ca095</associationId>
</AssociateAddressResponse>
"""
def test_associate_address(self):
self.set_http_response(status_code=200)
result = self.ec2.associate_address(instance_id='i-1234',
public_ip='192.0.2.1')
self.assertEqual(True, result)
def test_associate_address_object(self):
self.set_http_response(status_code=200)
result = self.ec2.associate_address_object(instance_id='i-1234',
public_ip='192.0.2.1')
self.assertEqual('eipassoc-fc5ca095', result.association_id)
class TestAssociateAddressFail(TestEC2ConnectionBase):
def default_body(self):
return """
<Response>
<Errors>
<Error>
<Code>InvalidInstanceID.NotFound</Code>
<Message>The instance ID 'i-4cbc822a' does not exist</Message>
</Error>
</Errors>
<RequestID>ea966190-f9aa-478e-9ede-cb5432daacc0</RequestID>
<StatusCode>Failure</StatusCode>
</Response>
"""
def test_associate_address(self):
self.set_http_response(status_code=200)
result = self.ec2.associate_address(instance_id='i-1234',
public_ip='192.0.2.1')
self.assertEqual(False, result)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "31b399a789e6e035343fc2dd7a553804",
"timestamp": "",
"source": "github",
"line_count": 1410,
"max_line_length": 121,
"avg_line_length": 42.08510638297872,
"alnum_prop": 0.5299966295921806,
"repo_name": "kaushik94/boto",
"id": "c68f29332a3425738904542fde9b86eb4516d07a",
"size": "59362",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/unit/ec2/test_connection.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Utils UTs."""
from unittest import main
from b3j0f.utils.ut import UTCase
from .base import Schema
from ..utils import (
DynamicValue, data2schema, ThisSchema, validate, updatecontent,
RegisteredSchema, dump, RefSchema, AnySchema, data2schemacls,
datatype2schemacls
)
from ..elementary import (
StringSchema, IntegerSchema, TypeSchema, FloatSchema, BooleanSchema
)
from ..registry import registercls, unregistercls
from six import string_types
from numbers import Number
class UpdateContentTest(UTCase):
def _assert(self, schemacls):
self.assertIsInstance(schemacls.a, IntegerSchema)
self.assertIsInstance(schemacls.b, FloatSchema)
self.assertIsInstance(schemacls.c, StringSchema)
self.assertIsInstance(schemacls.d, TypeSchema)
self.assertIsInstance(schemacls.e, AnySchema)
def test_object(self):
class TestSchema(object):
a = 1
b = 2.
c = str()
d = object
e = None
updatecontent(TestSchema, updateparents=False)
self._assert(TestSchema)
def test_schema(self):
class TestSchema(Schema):
a = 1
b = 2.
c = str()
d = object
e = None
updatecontent(TestSchema, updateparents=False)
self._assert(TestSchema)
def test_object_decorator(self):
@updatecontent(updateparents=False)
class TestSchema(object):
a = 1
b = 2.
c = str()
d = object
e = None
self._assert(TestSchema)
def test_schema_decorator(self):
@updatecontent(updateparents=False)
class TestSchema(Schema):
a = 1
b = 2.
c = str()
d = object
e = None
self._assert(TestSchema)
class DumpTest(UTCase):
def test_dump(self):
schema = Schema()
dumped = dump(schema)
self.assertEqual(
dumped,
{
'default': schema.default,
'name': '',
'uuid': schema.uuid,
'nullable': schema.nullable,
'required': schema.required,
'version': schema.version,
'doc': schema.doc
}
)
def test_dumped_content(self):
class TestSchema(RegisteredSchema):
a = Schema(default=Schema())
b = Schema()
schema = TestSchema()
dumped = dump(schema)
self.assertEqual(
dumped,
{
'a': {
'default': None,
'name': '',
'uuid': schema.a.uuid,
'nullable': schema.a.nullable,
'required': schema.a.required,
'version': schema.a.version,
'doc': schema.a.doc
},
'b': None,
'default': schema.default,
'name': '',
'uuid': schema.uuid,
'nullable': schema.nullable,
'required': schema.required,
'version': schema.version,
'doc': schema.doc
}
)
class ValidateTest(UTCase):
def test_validate(self):
schema = Schema()
validate(schema, None)
validate(schema, 1)
schema.nullable = False
self.assertRaises(ValueError, validate, schema, None)
schema.nullable = True
validate(schema, None)
class ThisSchemaTest(UTCase):
def test_error(self):
def definition():
class Test(RegisteredSchema):
test = ThisSchema(default='test', nullable=False)
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self.assertRaises(NameError, definition)
def test_error_deco(self):
def definition():
@updatecontent
class Test(Schema):
__update_content__ = False
test = ThisSchema(default='test', nullable=False)
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self.assertRaises(NameError, definition)
def test(self):
class Test(Schema):
__update_content__ = False
test = ThisSchema(test='test', nullable=False)
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self.assertIsInstance(Test.test, ThisSchema)
updatecontent(Test)
self.assertIsInstance(Test.test, Test)
self.assertEqual(Test.test._test_, 'test')
self.assertFalse(Test.test.nullable)
def test_params(self):
this = ThisSchema(1, 2, a=3, b=4)
self.assertEqual(this.args, (1, 2))
self.assertEqual(this.kwargs, {'a': 3, 'b': 4})
def test_default(self):
class TestSchema(RegisteredSchema):
default = ThisSchema()
schema = TestSchema()
self.assertIsInstance(schema.default, TestSchema)
schema = TestSchema(default=None)
self.assertIsNone(schema._default_)
self.assertIsNone(schema.default)
class Data2SchemaTest(UTCase):
class BaseTest(Schema):
def __init__(self, default=None, *args, **kwargs):
super(Data2SchemaTest.BaseTest, self).__init__(*args, **kwargs)
self.default = default
class Test(BaseTest):
pass
def setUp(self):
registercls(
schemacls=Data2SchemaTest.BaseTest,
data_types=[Data2SchemaTest.BaseTest]
)
def tearDown(self):
unregistercls(Data2SchemaTest.BaseTest)
unregistercls(Data2SchemaTest.Test)
unregistercls(object)
def test_default(self):
self.assertIsNone(data2schema(object()))
def test_default_force(self):
res = data2schema(_data=map, _force=True, name='test')
self.assertIsNotNone(res)
self.assertEqual(res.name, 'test')
def test_default_besteffort(self):
self.assertIsNone(data2schema(object(), _besteffort=False))
def test_dynamicvalue(self):
res = data2schema(DynamicValue(lambda: ''), name='test', _force=True)
self.assertIsNotNone(res)
self.assertEqual(res.name, 'test')
def test_registered(self):
test = Data2SchemaTest.Test()
res = data2schema(_data=test)
self.assertEqual(res.default, test)
def test_registered_besteffort(self):
test = Data2SchemaTest.Test()
res = data2schema(_data=test, _besteffort=False)
self.assertIsNone(res)
def test_w_attrs(self):
class Test(object):
pass
test = Test()
test.test = 1
res = data2schema(_data=test, _force=True)
self.assertTrue(hasattr(res, 'test'))
class RefSchemaTest(UTCase):
def setUp(self):
class NumberSchema(Schema):
default = 0
def _validate(self, data, *args, **kwargs):
if not isinstance(data, Number):
raise TypeError()
self.numberschema = NumberSchema()
class StringSchema(Schema):
def _validate(self, data, *args, **kwargs):
if not isinstance(data, string_types):
raise TypeError()
self.stringschema = StringSchema()
def test_default_noref(self):
schema = RefSchema()
schema.default = 0
def test_default(self):
schema = RefSchema(ref=self.numberschema, default=1)
self.assertEqual(schema.default, 1)
schema.default = 0
self.assertEqual(schema.default, 0)
self.assertRaises(TypeError, setattr, schema, 'default', '')
def test_owner(self):
schema = RefSchema()
schema._validate(0, owner=self.numberschema)
def test_ref(self):
schema = RefSchema(ref=self.numberschema)
self.assertEqual(schema.default, self.numberschema.default)
self.assertRaises(TypeError, setattr, schema, 'ref', self.stringschema)
class Data2SchemaClsTest(UTCase):
def test_dict(self):
data = {
'a': 1,
'b': True
}
schemacls = data2schemacls(_data=data, name='test')
self.assertIsInstance(schemacls.a, IntegerSchema)
self.assertEqual(schemacls.a.default, 1)
self.assertIsInstance(schemacls.b, BooleanSchema)
self.assertEqual(schemacls.b.default, True)
self.assertIsInstance(schemacls.name, StringSchema)
self.assertEqual(schemacls.name.default, 'test')
validate(schemacls(), data)
def test_object(self):
class Test(object):
a = 1
b = True
schemacls = data2schemacls(_data=Test, name='test')
self.assertIsInstance(schemacls.a, IntegerSchema)
self.assertEqual(schemacls.a.default, 1)
self.assertIsInstance(schemacls.b, BooleanSchema)
self.assertEqual(schemacls.b.default, True)
self.assertIsInstance(schemacls.name, StringSchema)
self.assertEqual(schemacls.name.default, 'test')
validate(schemacls(), Test)
validate(schemacls(), Test())
class DataType2Schemacls(UTCase):
def test_namespace(self):
class A:
def test(self):
pass
schemacls = datatype2schemacls(A)
validate(schemacls(), A())
self.assertTrue(hasattr(schemacls, 'test'))
def test_cls(self):
class A(object):
def test(self):
pass
schemacls = datatype2schemacls(A)
validate(schemacls(), A())
self.assertTrue(hasattr(schemacls, 'test'))
def test_slots(self):
class A(object):
__slots__ = ['A']
def test(self):
pass
schemacls = datatype2schemacls(A)
validate(schemacls(), A())
self.assertTrue(hasattr(schemacls, 'test'))
if __name__ == '__main__':
main()
| {
"content_hash": "11beed037640e1c6101fae9333630e3b",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 79,
"avg_line_length": 22.8542600896861,
"alnum_prop": 0.5620523888943393,
"repo_name": "b3j0f/schema",
"id": "8c9b8f5593ed36d184f4fee4f5171ae773ad6dd1",
"size": "11534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "b3j0f/schema/test/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "130356"
}
],
"symlink_target": ""
} |
"""Options for BigMLer linear regression
"""
def get_linear_regression_options(defaults=None):
"""Adding arguments for the linear regression subcommand
"""
if defaults is None:
defaults = {}
options = {
# Input fields to include in the linear regression.
'--inear-fields': {
"action": 'store',
"dest": 'linear_fields',
"default": defaults.get('linear_fields', None),
"help": ("Comma-separated list of input fields"
" (predictors) to create the linear regression.")},
# If a BigML linear regression is provided, the script will
# use it to generate predictions
'--linear-regression': {
'action': 'store',
'dest': 'linear_regression',
'default': defaults.get('linear_regression', None),
'help': "BigML linear regression Id."},
# The path to a file containing linear regression ids.
'--linear-regressions': {
'action': 'store',
'dest': 'linear_regressions',
'default': defaults.get('linear_regressions', None),
'help': ("Path to a file containing linearregression/ids."
" One linearregression"
" per line (e.g., "
"linearregression/50a206a8035d0706dc000376"
").")},
# If a BigML json file containing a linear regression
# structure is provided,
# the script will use it.
'--linear-file': {
'action': 'store',
'dest': 'linear_file',
'default': defaults.get('linear_file', None),
'help': "BigML linear regression JSON structure file."},
# Does not create a linear regression just a dataset.
'--no-linear-regression': {
'action': 'store_true',
'dest': 'no_linear_regression',
'default': defaults.get('no_linear_regression', False),
'help': "Do not create a linear regression."},
# The path to a file containing linear regression attributes.
'--linear-regression-attributes': {
'action': 'store',
'dest': 'linear_regression_attributes',
'default': defaults.get('linear_regression_attributes', None),
'help': ("Path to a json file describing linear regression"
" attributes.")},
# Create a linear regression, not just a dataset.
'--no-no-linear-regression': {
'action': 'store_false',
'dest': 'no_linear_regression',
'default': defaults.get('no_linear_regression', False),
'help': "Create a linear regression."}}
return options
| {
"content_hash": "1252beaaa1fc73fed7535e6a32277e32",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 38.30555555555556,
"alnum_prop": 0.5514865844815083,
"repo_name": "jaor/bigmler",
"id": "f37f84acfeb99f5092c8d175243cf2c411809bf3",
"size": "3360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigmler/options/linearregression.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26465"
},
{
"name": "JavaScript",
"bytes": "73784"
},
{
"name": "Jupyter Notebook",
"bytes": "802"
},
{
"name": "Python",
"bytes": "2081730"
},
{
"name": "R",
"bytes": "71763"
}
],
"symlink_target": ""
} |
import os, sys, inspect
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..'))
import pyeq2, pyeq2.ExtendedVersionHandlers
if __name__ == "__main__":
for submodule in inspect.getmembers(pyeq2.Models_2D):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in pyeq2.ExtendedVersionHandlers.extendedVersionHandlerNameList:
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
if (-1 != extendedVersionName.find('Reciprocal')) and (equationClass[1].autoGenerateReciprocalForm == False):
continue
if (-1 != extendedVersionName.find('Inverse')) and (equationClass[1].autoGenerateInverseForms == False):
continue
if (-1 != extendedVersionName.find('Growth')) and (equationClass[1].autoGenerateGrowthAndDecayForms == False):
continue
if (-1 != extendedVersionName.find('Decay')) and (equationClass[1].autoGenerateGrowthAndDecayForms == False):
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
print '2D ' + submodule[0] + ' --- ' + equation.GetDisplayName()
print 'Done.' | {
"content_hash": "395bcc1a961865ea06f72230ed01a6b6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 134,
"avg_line_length": 56.13333333333333,
"alnum_prop": 0.5564133016627079,
"repo_name": "JMoravec/unkRadnet",
"id": "82855143fa8c962425d5d41f49adb5551c14a438",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitToCurve/pyeq2/Examples/Complex/ListAllExtendedVersionsOfEquations_2D.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6107"
},
{
"name": "Logos",
"bytes": "132148"
},
{
"name": "M",
"bytes": "832584"
},
{
"name": "Matlab",
"bytes": "401"
},
{
"name": "Python",
"bytes": "2747757"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
} |
from flask import Blueprint
main = Blueprint('main', __name__)
from . import errors
from . import views
| {
"content_hash": "7084bb8613f62d25144c34831f8207c7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 34,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.7169811320754716,
"repo_name": "lvhuiyang/cxcy-ims",
"id": "19559f85510ed221f36b749fd447a6dc09fa2bb4",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3394"
},
{
"name": "HTML",
"bytes": "89877"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "74812"
}
],
"symlink_target": ""
} |
import os
import pkg_resources
def add_plugin(egg_info_dir, plugin_name):
"""
Add the plugin to the given distribution (or spec), in
.egg-info/paster_plugins.txt
"""
fn = os.path.join(egg_info_dir, 'paster_plugins.txt')
if not os.path.exists(fn):
lines = []
else:
f = open(fn)
lines = [l.strip() for l in f.readlines() if l.strip()]
f.close()
if plugin_name in lines:
# Nothing to do
return
lines.append(plugin_name)
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
f = open(fn, 'w')
for line in lines:
f.write(line)
f.write('\n')
f.close()
def remove_plugin(egg_info_dir, plugin_name):
"""
Remove the plugin to the given distribution (or spec), in
.egg-info/paster_plugins.txt. Raises ValueError if the
plugin is not in the file.
"""
fn = os.path.join(egg_info_dir, 'paster_plugins.txt')
if not os.path.exists(fn):
raise ValueError(
"Cannot remove plugin from %s; file does not exist"
% fn)
f = open(fn)
lines = [l.strip() for l in f.readlines() if l.strip()]
f.close()
for line in lines:
# What about version specs?
if line.lower() == plugin_name.lower():
break
else:
raise ValueError(
"Plugin %s not found in file %s (from: %s)"
% (plugin_name, fn, lines))
lines.remove(line)
print 'writing', lines
f = open(fn, 'w')
for line in lines:
f.write(line)
f.write('\n')
f.close()
def find_egg_info_dir(dir):
while 1:
try:
filenames = os.listdir(dir)
except OSError:
# Probably permission denied or something
return None
for fn in filenames:
if fn.endswith('.egg-info'):
return os.path.join(dir, fn)
parent = os.path.dirname(dir)
if parent == dir:
# Top-most directory
return None
dir = parent
def resolve_plugins(plugin_list):
found = []
while plugin_list:
plugin = plugin_list.pop()
try:
pkg_resources.require(plugin)
except pkg_resources.DistributionNotFound, e:
msg = '%sNot Found%s: %s (did you run python setup.py develop?)'
if str(e) != plugin:
e.args = (msg % (str(e) + ': ', ' for', plugin)),
else:
e.args = (msg % ('', '', plugin)),
raise
found.append(plugin)
dist = get_distro(plugin)
if dist.has_metadata('paster_plugins.txt'):
data = dist.get_metadata('paster_plugins.txt')
for add_plugin in parse_lines(data):
if add_plugin not in found:
plugin_list.append(add_plugin)
return map(get_distro, found)
def get_distro(spec):
return pkg_resources.get_distribution(spec)
def load_commands_from_plugins(plugins):
commands = {}
for plugin in plugins:
commands.update(pkg_resources.get_entry_map(
plugin, group='paste.paster_command'))
return commands
def parse_lines(data):
result = []
for line in data.splitlines():
line = line.strip()
if line and not line.startswith('#'):
result.append(line)
return result
def load_global_commands():
commands = {}
for p in pkg_resources.iter_entry_points('paste.global_paster_command'):
commands[p.name] = p
return commands
def egg_name(dist_name):
return pkg_resources.to_filename(pkg_resources.safe_name(dist_name))
def egg_info_dir(base_dir, dist_name):
all = []
for dir_extension in ['.'] + os.listdir(base_dir):
full = os.path.join(base_dir, dir_extension,
egg_name(dist_name)+'.egg-info')
all.append(full)
if os.path.exists(full):
return full
raise IOError("No egg-info directory found (looked in %s)"
% ', '.join(all))
| {
"content_hash": "e829a3dd6b3dcf0391a2d016acf186a2",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 76,
"avg_line_length": 30.71212121212121,
"alnum_prop": 0.5614208189442526,
"repo_name": "santisiri/popego",
"id": "34afc5a32f41a656af6a6af392ff33d9271df783",
"size": "4223",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/PasteScript-1.3.6-py2.5.egg/paste/script/pluginlib.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
import requests
import jsonpickle
from requests_oauthlib import OAuth1
from urllib.parse import parse_qs, urlencode
import cherrypy
from collections import defaultdict
import json
import os
import re
from collections import defaultdict
# For readable serializations
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
class LocalCache(object):
""" Generic class for encapsulating twitter credential caching """
server_data_template = "{}.server"
user_data_template = "{0}.user.{1}"
def __init__(self, backup = "tmp/twitter.cache"):
self.backup = backup #Unique identifier for the backup of this cache
self.memcache = {
"users" : defaultdict(lambda : {}),
"server": defaultdict(lambda : {})
}
self.deserialize()
def users(self):
return self.memcache['users']
def set_user_state(self, user_id, state):
self.memcache['users'][user_id] = state
def update_user_state(self, user_id, state = {}):
self.memcache['users'][user_id].update(state)
def get_user_state(self, user_id):
return self.memcache['users'][user_id]
def clear_user_state(self, user_id):
return self.memcache['users'][user_id].clear()
def update_server_state(self, state_dict):
self.memcache['server'].update(state_dict)
def get_server_state(self):
return self.memcache['server']
def clear_server_state(self):
return self.memcache['server'].clear()
def initialize_user_queue(self, user_id, queue):
self.memcache['users'][user_id]['user_queue'] = ReadableQueue(queue)
def user_queue(self, user_id):
if 'user_queue' in self.memcache['users'][user_id]:
return self.memcache['users'][user_id]['user_queue']
def server_fname(self):
return self.server_data_template.format(self.backup)
def user_fname(self, user):
return self.user_data_template.format(self.backup, user)
def deserialize(self):
cache_loaded = False
if os.path.exists(self.server_fname()) and not os.path.isdir(self.backup):
try:
self.memcache = { "server" : {},
"users" : {} }
with open(self.server_fname()) as backupfile:
print ("Attempting to reload cache")
self.memcache['server'] = jsonpickle.decode(backupfile.read())
print ("Server cache loaded", json.dumps(self.memcache, indent=4))
for user in self.memcache['server']['user_list']:
# Try to load as much user data as possible
if os.path.exists(self.user_fname(user)):
print ("found path for user", user)
with open(self.user_fname(user)) as userfile:
user_data = jsonpickle.decode(userfile.read())
self.memcache['users'][user] = user_data
cache_loaded = True
except Exception as e:
print ("Cache file corrupted...")
raise e
if not cache_loaded:
print ("Cache could not be loaded")
pass
else:
print ("CACHE LOADED SUCCESSFULLY!")
def serialize(self):
json_to_serialize = self.memcache['server']
user_list = list(self.users().keys())
json_to_serialize.update({"user_list" : user_list})
with open(self.server_fname(), 'w') as backup_server:
# Serialize Server:
json_encoded = jsonpickle.encode(json_to_serialize)
backup_server.write(json_encoded)
for user in user_list:
user_data = self.get_user_state(user)
json_encoded = jsonpickle.encode(user_data)
with open(self.user_fname(user), 'w') as userfile:
userfile.write(json_encoded)
class ReadableQueue(object):
def __init__(self, queue=[], pos=0):
self.hashmap = { "queue" : [(i, e) for i,e in enumerate(queue)],
"pos" : pos }
return
def queue(self):
return self.hashmap['queue']
def is_empty(self):
return len(self.queue()) == 0
def is_finished(self):
return self.pos() == len(self.queue())
def pos(self):
return self.hashmap['pos']
def set_pos(self, val):
self.hashmap['pos'] = val
def get_next(self, offset=1):
if self.pos() < len(self.queue()):
temp_queue = self.queue()[self.pos(): self.pos() + offset]
self.set_pos(self.pos() + offset)
if self.pos() > len(self.queue()): self.set_pos(len(self.queue()))
return temp_queue
def read_out_next(self, offset=1):
return " ".join([readable.read_out(index) for index,readable in self.get_next(offset)])
def has_prev(self):
return self.pos() > 0
def get_prev(self, offset=1):
if self.pos() > 0:
self.set_pos(self.pos() - offset)
if self.pos() < 0:
offset = offset + self.pos()
# [1, current(2), 3] get_prev(offeset=3)
# pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1
self.set_pos(0)
return self.queue()[self.pos() : offset]
return None
def read_out_prev(self, offset=1):
return " ".join([readable.read_out() for readable in self.get_prev(offset)])
#Local cache caches tokens for different users
local_cache = LocalCache()
def strip_html(text):
""" Get rid of ugly twitter html """
def reply_to(text):
replying_to = []
split_text = text.split()
for index, token in enumerate(split_text):
if token.startswith('@'): replying_to.append(token[1:])
else:
message = split_text[index:]
break
rply_msg = ""
if len(replying_to) > 0:
rply_msg = "Replying to "
for token in replying_to[:-1]: rply_msg += token+","
if len(replying_to)>1: rply_msg += 'and '
rply_msg += replying_to[-1]+". "
return rply_msg + " ".join(message)
text = reply_to(text)
text = text.replace('@', ' ')
return " ".join([token for token in text.split()
if ('http:' not in token) and ('https:' not in token)])
class Tweet(object):
def __init__(self, json_obj):
self.tweet = json_obj
def get_id(self):
return self.tweet['id']
def get_raw_text(self):
return self.tweet['text']
def _process_text(self):
text = strip_html(self.tweet['text'])
user_mentions = self.tweet['entities']['user_mentions']
text = text.replace('@', 'at ')
for user in user_mentions:
text = text.replace(user['screen_name'], user['name'])
return text
def get_screen_name(self):
return self.tweet['user']['screen_name']
def get_user_name(self):
return self.tweet['user']['name']
def read_out(self, index):
text = self._process_text()
return "tweet number {num} by {user} : {text} ,".format(num=index+1,
user=self.get_user_name(),
text = text)
def detailed_description(self):
response_builder = ["This tweet was posted by {user_name} whose twitter handle is {screen_name} the account description reads: {description}."
.format(screen_name=self.tweet['user']['screen_name'],
user_name=self.tweet['user']['name'],
description=self.tweet['user']['description'])]
if self.tweet['retweeted']:
response_builder += ["It's been retweeted {} times.".format(self.tweet['retweet_count'])]
if self.tweet['favorited']:
response_builder += ["{} people have favorited it.".format(self.tweet['favorites_count'])]
if self.tweet["in_reply_to_screen_name"]:
response_builder += ["it was posted in response to user {}.".format(self.tweet['in_reply_to_screen_name'])]
response_builder += ["the text of the tweet is, {}.".format(self._process_text())]
return " ".join(response_builder)
def user_mentions(self):
return self.tweet['user_mentions']
def get_cached_access_pair(uid):
if uid in local_cache.users():
access_token = local_cache.get_user_state(uid)['access_token']
access_secret = local_cache.get_user_state(uid)['access_secret']
return access_token, access_secret
else:
raise ValueError
def get_request_token(callback_url=None):
url = "https://api.twitter.com/oauth/request_token"
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret)
params = { "oauth_callback" : callback_url }
r = requests.post(url, auth=auth, params=params)
response_obj = parse_qs(r.text)
local_cache.update_server_state({ "request_token" : response_obj['oauth_token'][0],
"request_secret": response_obj['oauth_token_secret'][0] })
return response_obj['oauth_token_secret'], response_obj['oauth_token']
def authenticate_user_page(callback_url="", metadata=None):
url = "https://api.twitter.com/oauth/authenticate"
oauth_secret, oauth_token = get_request_token(callback_url)
local_cache.update_server_state({'metadata' : metadata })
params = { "force_login" : True,
"oauth_token": oauth_token }
r = requests.get(url, params=params)
return r.text
def post_tweet(user_id, message, additional_params={}):
"""
Helper function to post a tweet
"""
url = "https://api.twitter.com/1.1/statuses/update.json"
params = { "status" : message }
params.update(additional_params)
r = make_twitter_request(url, user_id, params, request_type='POST')
print (r.text)
return "Successfully posted a tweet {}".format(message)
def get_access_token(oauth_token, oauth_verifier):
url = "https://api.twitter.com/oauth/access_token"
params = {"oauth_verifier" : oauth_verifier}
server_state = local_cache.get_server_state()
request_token = server_state['request_token']
request_secret = server_state['request_secret']
consumer_key, consumer_secret = server_state['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret, request_token, request_secret)
r = requests.post(url, params = params, auth=auth)
response_obj = parse_qs(r.text)
uid = response_obj['oauth_token'][0]
print ("Access token", uid)
local_cache.set_user_state(user_id = uid,
state = { "access_token" : response_obj['oauth_token'][0],
"access_secret" : response_obj['oauth_token_secret'][0],
'twitter_user_id': response_obj['user_id'][0],
'screen_name' : response_obj ['screen_name'][0]
})
local_cache.serialize()
fragments = {
"state" : local_cache.get_server_state()['metadata']['state'],
"access_token" : uid,
"token_type" : "Bearer"
}
return urlencode(fragments)
def get_twitter_auth(user_id):
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
access_token, access_secret = get_cached_access_pair(user_id)
return OAuth1(consumer_key, consumer_secret, access_token, access_secret)
def process_tweets(tweet_list):
""" Clean tweets and enumerate, preserving only things that we are interested in """
return [Tweet(tweet) for tweet in tweet_list]
def make_twitter_request(url, user_id, params={}, request_type='GET'):
""" Generically make a request to twitter API using a particular user's authorization """
if request_type == "GET":
return requests.get(url, auth=get_twitter_auth(user_id), params=params)
elif request_type == "POST":
return requests.post(url, auth=get_twitter_auth(user_id), params=params)
def get_user_twitter_details(user_id, params={}):
url = "https://api.twitter.com/1.1/users/lookup.json"
user_cache = local_cache.get_user_state(user_id)
params.update({"user_id": user_cache['twitter_user_id'] })
response = make_twitter_request(url, user_id, params)
return response.json()
def geo_search(user_id, search_location):
"""
Search for a location - free form
"""
url = "https://api.twitter.com/1.1/geo/search.json"
params = {"query" : search_location }
response = make_twitter_request(url, user_id, params).json()
return response
def closest_trend_search(user_id, params={}):
#url = "https://api.twitter.com/1.1/trends/place.json"
url = "https://api.twitter.com/1.1/trends/closest.json"
response = make_twitter_request(url, user_id, params).json()
return response
def list_trends(user_id, woe_id):
url = "https://api.twitter.com/1.1/trends/place.json"
params = { "id" : woe_id }
response = make_twitter_request(url, user_id, params).json()
return response
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)]
def request_tweet_list(url, user_id, params={}):
return process_tweets(make_twitter_request(url, user_id).json())
def get_home_tweets(user_id, input_params={}):
url = "https://api.twitter.com/1.1/statuses/home_timeline.json"
print ("Trying to get home tweets")
response = request_tweet_list(url, user_id)
return response
def get_retweets_of_me(user_id, input_params={}):
""" returns recently retweeted tweets """
url = "https://api.twitter.com/1.1/statuses/retweets_of_me.json"
print ("trying to get retweets")
return request_tweet_list(url, user_id)
def get_my_favourite_tweets(user_id, input_params = {}):
""" Returns a user's favourite tweets """
url = "https://api.twitter.com/1.1/favorites/list.json"
return request_tweet_list(url, user_id)
def get_user_latest_tweets(user_id, params={}):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json?"
return request_tweet_list(url, user_id, params)
def get_latest_twitter_mentions(user_id):
url = "https://api.twitter.com/1.1/statuses/mentions_timeline.json"
return request_tweet_list(url, user_id)
def search_for_tweets_about(user_id, params):
""" Search twitter API """
url = "https://api.twitter.com/1.1/search/tweets.json"
response = make_twitter_request(url, user_id, params)
return process_tweets(response.json()["statuses"])
| {
"content_hash": "4426eed72846f69e4089b8ffa0bcddc0",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 150,
"avg_line_length": 36.365795724465556,
"alnum_prop": 0.5872632266492489,
"repo_name": "anjishnu/ask-alexa-twitter",
"id": "e0986b7dc3912a34a19f7612f40be9b6072d9a7e",
"size": "15310",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/twitter_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52142"
},
{
"name": "Shell",
"bytes": "542"
}
],
"symlink_target": ""
} |
import autocomplete_light
from django import forms
from django.core.validators import RegexValidator
from django.utils.translation import ugettext as _
from .models import Person, Country, Genre, Language, Film
class SearchForm(forms.Form):
title = forms.ModelChoiceField(
Film.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('FilmAutocomplete'),
required=False,
label=_('Title')
)
genre = forms.ModelChoiceField(
Genre.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('GenreAutocomplete'),
required=False,
label=_('Genre')
)
language = forms.ModelChoiceField(
Language.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('LanguageAutocomplete'),
required=False,
label=_('Language')
)
country = forms.ModelChoiceField(
Country.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('CountryAutocomplete'),
required=False,
label=_('Country')
)
director = forms.ModelChoiceField(
Person.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('PersonDirectorAutocomplete'),
required=False,
label=_('Director')
)
writer = forms.ModelChoiceField(
Person.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('PersonWriterAutocomplete'),
required=False,
label=_('Writer')
)
cast = forms.ModelChoiceField(
Person.objects.all(),
widget=autocomplete_light.MultipleChoiceWidget('PersonCastAutocomplete'),
required=False,
label=_('Cast')
)
year_start = forms.IntegerField(
required=False,
widget=forms.widgets.TextInput,
validators=[RegexValidator("\d+")],
label=_('Year start')
)
year_end = forms.IntegerField(
required=False,
widget=forms.widgets.TextInput,
validators=[RegexValidator("\d+")],
label=_('Year end')
)
| {
"content_hash": "9c9126445fa4da3be6de7546b7c7ff74",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 85,
"avg_line_length": 28.690140845070424,
"alnum_prop": 0.648993618065783,
"repo_name": "dvalcarce/filmyou-web",
"id": "971f069fe11d4386ed79499e12949a607bd48fcf",
"size": "2062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/films/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6664"
},
{
"name": "JavaScript",
"bytes": "16164"
},
{
"name": "PHP",
"bytes": "969"
},
{
"name": "Python",
"bytes": "84226"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0007_partner_logo'),
]
operations = [
migrations.RemoveField(
model_name='partner',
name='description',
),
]
| {
"content_hash": "95d559fb3896bf70ae78c5234bd33ced",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 41,
"avg_line_length": 18.823529411764707,
"alnum_prop": 0.5875,
"repo_name": "HackSoftware/hackconf.bg",
"id": "143f31a0e6d5cf78f0aeefe6a8b69b436f4b55c9",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/website/migrations/0008_remove_partner_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "96769"
},
{
"name": "HTML",
"bytes": "50654"
},
{
"name": "JavaScript",
"bytes": "3477"
},
{
"name": "Python",
"bytes": "162387"
},
{
"name": "Shell",
"bytes": "4611"
}
],
"symlink_target": ""
} |
__author__ = 'Joe Linn'
import abc
import pylastica.param
class AbstractFacet(pylastica.param.Param):
__metaclass__ = abc.ABCMeta
def __init__(self, name):
"""
@param name: the name of the facet
@type name: str
"""
super(AbstractFacet, self).__init__()
self._facet = {}
self.name = name
@property
def name(self):
"""
Get the name of this facet
@return:
@rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Set the name of this facet.
@param name:
@type name: str
"""
assert isinstance(name, str) and name != '', "name must be a string: %r" % name
self._name = name
def set_filter(self, facet_filter):
"""
Set a filter for this facet
@param facet_filter: a filter to apply on the facet
@type facet_filter: pylastica.filter.AbstractFilter
@return:
@rtype: self
"""
if not isinstance(facet_filter, pylastica.filter.AbstractFilter):
raise TypeError("facet_filter must be an instance of an implementation of AbstractFilter: %r" % facet_filter)
return self._set_facet_param('facet_filter', facet_filter.to_dict())
def set_global(self, glob=True):
"""
Sets the flag to either run the facet globally or bound to the current search query
@param glob:
@type glob: bool
@return:
@rtype: self
"""
return self._set_facet_param('global', bool(glob))
def set_nested(self, nested_path):
"""
Set the path for nexted documents
@param nested_path: document path
@type nested_path: str
@return:
@rtype: self
"""
return self._set_facet_param('nested', nested_path)
def set_scope(self, scope):
"""
Set the scope
@param scope:
@type scope: str
@return:
@rtype: self
"""
return self._set_facet_param('scope', scope)
def to_dict(self):
"""
@return:
@rtype: dict
"""
return self._facet
def _set_facet_param(self, key, value):
"""
Sets a param for the facet. Each facet implementation must handle its own parameters.
@param key:
@type key: str
@param value:
@type value: mixed
@return:
@rtype: self
"""
self._facet[key] = value
return self
| {
"content_hash": "9d6412dc819442a69410ddcab6259abd",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 121,
"avg_line_length": 25.838383838383837,
"alnum_prop": 0.5390930414386239,
"repo_name": "jlinn/pylastica",
"id": "52f914289ecc62095448fe9e4d6b7c66e38a926e",
"size": "2558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylastica/facet/abstractfacet.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "547260"
},
{
"name": "Shell",
"bytes": "1771"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('fum', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='groups',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='projects',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='servers',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='users',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, null=True, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "1aab44c27e625ecd53e4bc7b7c7e6c3c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 97,
"avg_line_length": 31.18421052631579,
"alnum_prop": 0.5915611814345991,
"repo_name": "tigeli/futurice-ldap-user-manager",
"id": "aa41dca745fca9268d4ca1250aaf81b970bc1379",
"size": "1209",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fum/migrations/0002_auto_20150122_1308.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41598"
},
{
"name": "HTML",
"bytes": "43890"
},
{
"name": "JavaScript",
"bytes": "684848"
},
{
"name": "Python",
"bytes": "305516"
},
{
"name": "Shell",
"bytes": "3559"
}
],
"symlink_target": ""
} |
import logging
import requests
import mimeparse
class RawVideoPlugin:
"""An export plugin that only tries to post the raw source of a video.
"""
def __init__(self, useragent: str, **options):
""" This plugin requires no initialization other than useragent.
:param useragent: The useragent to use to perform HTTP HEAD requests.
:param options:
:return:
"""
self.log = logging.getLogger('lapis.rawvideo')
self.useragent = useragent
self.headers = {'User-Agent': self.useragent}
def export_submission(self,
import_urls: list,
video: bool=False,
**import_info) -> dict:
"""Check if something reported as a video is a raw video, then
post the direct link if it is.
This function will define the following values in the export data:
- link_display
:param import_urls: A set (of one?) of links to videos.
:param video: Whether the imported data is a video or not.
:param import_info: Other importing information passed. Ignored.
:return: None if no export, an export info dictionary otherwise.
"""
if not video:
return None
self.log.debug('Attempting to upload raw video URL.')
links = []
for url in import_urls:
req = requests.head(url, headers=self.headers)
if not req.ok:
self.log.debug('URL %s was not valid.', url)
continue
try:
mime_text = req.headers.get('Content-Type')
mime = mimeparse.parse_mime_type(mime_text)
except Exception:
self.log.debug('Error parsing MIME for URL %s', url)
continue
if mime[0] != 'video':
self.log.debug('URL %s is not a video!', url)
continue
links.append('[Direct video](%s) \n' % url)
if not links:
self.log.info('No direct video links found!')
return None
return {'link_display': ''.join(links)}
__plugin__ = RawVideoPlugin
# END OF LINE.
| {
"content_hash": "7cef7280ef7a73ad6c9c86f6c658950b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 34.87301587301587,
"alnum_prop": 0.5616750113791534,
"repo_name": "Shugabuga/LapisMirror",
"id": "634cf3cd45ace7871fda3a99ca217b05a8148b6b",
"size": "3307",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plugins/rawvideo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101159"
}
],
"symlink_target": ""
} |
import os
import tempfile
from office365.sharepoint.client_context import ClientContext
from tests import test_team_site_url, test_client_credentials
client = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
sharing_link_url = "https://mediadev8.sharepoint.com/:x:/s/team/EcEbi_M2xQJLng_bvQjPtgoB1rB6BFvMVFixnf4wOxfE5w?e=bzNjb6"
download_path = os.path.join(tempfile.mkdtemp(), "Report.csv")
with open(download_path, "wb") as local_file:
file = client.web.get_file_by_guest_url(sharing_link_url).download(local_file).execute_query()
print("[Ok] file has been downloaded into: {0}".format(download_path))
| {
"content_hash": "e0ec05d3129b87b29205e0cf0bad6671",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 120,
"avg_line_length": 40.0625,
"alnum_prop": 0.7784711388455539,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "b49b2b8887aadf09e08cc993a0fd377e2a6ac9ef",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sharepoint/files/get_shared_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
import os
import csv
delimiter = ','
quote_character = '"'
DATADIR = ""
DATAFILE = "beatles-diskography.csv"
def parse_file(datafile):
data = []
csv_fp = open(DATAFILE, 'rb')
csv_reader = csv.DictReader(csv_fp, fieldnames=[], restkey='undefined-fieldnames', delimiter=delimiter, quotechar=quote_character)
current_row = 0
for row in csv_reader:
current_row += 1
if current_row > 11:
break
# Use heading rows as field names for all other rows.
if current_row == 1:
csv_reader.fieldnames = row['undefined-fieldnames']
continue
print (csv_reader.fieldnames[0]+':',row['Title'],csv_reader.fieldnames[1]+':',row['Released'],csv_reader.fieldnames[2]+':',row['Label'],csv_reader.fieldnames[3]+':',row['UK Chart Position'],csv_reader.fieldnames[4]+':',row['US Chart Position'],csv_reader.fieldnames[5]+':',row['BPI Certification'],csv_reader.fieldnames[6]+':',row['RIAA Certification'])
return data
| {
"content_hash": "f6885825730aa2651334a88054642e9e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 361,
"avg_line_length": 36.42857142857143,
"alnum_prop": 0.6284313725490196,
"repo_name": "mgumiero9/python",
"id": "1fda82ca4eedb2b9e17151bfd63d19a102f9b812",
"size": "1020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsecsv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2423"
}
],
"symlink_target": ""
} |
# mygame/world/batchcode_world.py
from evennia import create_object, search_object
from typeclasses import rooms, exits
from evennia.utils import evtable
from world import map_module
# We begin by creating our rooms so we can detail them later.
centre = create_object(rooms.Room, key="crossroads")
north = create_object(rooms.Room, key="castle")
east = create_object(rooms.Room, key="cottage")
south = create_object(rooms.Room, key="camp")
west = create_object(rooms.Room, key="coast")
# This is where we set up the cross roads.
# The rooms description is what we see with the 'look' command.
# Replace the descriptions with the below code.
# The cross roads.
# We pass what we want in our table and EvTable does the rest.
# Passing two arguments will create two columns but we could add more.
# We also specify no border.
centre.db.desc = evtable.EvTable(map_module.return_minimap(4,5),
"The merger of two roads. A single lamp post dimly " \
"illuminates the lonely crossroads. To the north " \
"looms a mighty castle. To the south the glow of " \
"a campfire can be seen. To the east lie a wall of " \
"mountains and to the west the dull roar of the open sea.",
border=None)
# EvTable allows formatting individual columns and cells. We use that here
# to set a maximum width for our description, but letting the map fill
# whatever space it needs.
centre.db.desc.reformat_column(1, width=70)
# [...]
# The northern castle.
north.db.desc = evtable.EvTable(map_module.return_minimap(4,2),
"An impressive castle surrounds you. There might be " \
"a princess in one of these towers.",
border=None)
north.db.desc.reformat_column(1, width=70)
# [...]
# The eastern cottage.
east.db.desc = evtable.EvTable(map_module.return_minimap(6,5),
"A cosy cottage nestled among mountains stretching " \
"east as far as the eye can see.",
border=None)
east.db.desc.reformat_column(1, width=70)
# [...]
# The southern camp.
south.db.desc = evtable.EvTable(map_module.return_minimap(4,7),
"Surrounding a clearing are a number of tribal tents " \
"and at their centre a roaring fire.",
border=None)
south.db.desc.reformat_column(1, width=70)
# [...]
# The western coast.
west.db.desc = evtable.EvTable(map_module.return_minimap(2,5),
"The dark forest halts to a sandy beach. The sound of " \
"crashing waves calms the soul.",
border=None)
west.db.desc.reformat_column(1, width=70) | {
"content_hash": "48732915034d7abfcf7cc2dc8a19258c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 76,
"avg_line_length": 37.986111111111114,
"alnum_prop": 0.6420475319926874,
"repo_name": "whitehorse-io/encarnia",
"id": "cb3796fadc6b0c5f24d29c8269e2b85bd6cf808d",
"size": "2760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Encarnia/world/batchcode_world.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63966"
},
{
"name": "CSS",
"bytes": "87525"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "91741"
},
{
"name": "JavaScript",
"bytes": "151335"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "Python",
"bytes": "24616242"
},
{
"name": "Shell",
"bytes": "8808"
}
],
"symlink_target": ""
} |
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
################### UNIT TEST ########################
import unittest
from Settings.TestData import TestData
from TestUnits.Test_main import Test_main
'''
@brief Test unit for PtGreyJordens
'''
class Test_PtGreyJordens(unittest.TestCase, Test_main, TestData):
def setUp(self):
'''
@brief Give all setups to the unit test.
'''
self.SetAllKey()
self.InitTestData()
#### IMPORTS #####
from Settings import Settings
from src.DroneVision.DroneVision_src.hardware.PtGrey import PtGreyJordens
self.Settings = Settings
self.PtGreyJordens = PtGreyJordens
##################
def tearDown(self):
'''
@brief Give all tear down steps.
Is runned even if the test failed.
'''
pass
def test_PtGreyCaptureFrame(self):
'''
@brief Test PtGreyJordens
'''
from src.DroneVision.DroneVision_src.hardware.imageTools import MatplotShow, RealTimePlot
from src.DroneVision.DroneVision_src.hardware.PyQtImage import PyQtImage
from Settings.Exceptions import PtGreyError
from src.bin.UserInput.UserInput import UserInput
import warnings
from getpass import getpass
settings = self.Settings.Settings()
ptgrey = self.PtGreyJordens.PtGreyJordens(settings.GetSettings('CAMERA', 'camera_triggerPin'), \
settings.GetSettings('CAMERA', 'ptg_triggerPin'), \
False, \
settings.GetSettings('CAMERA', 'ptg_recv_frame_timeout'))
print '\n\n'
#print 'FORMAT7: {0}\n\n'.format(ptgrey.SetFormat7Configuration(mode=ptgrey.GetFc2().MODE_0, pixel_format=ptgrey.GetFc2().PIXEL_FORMAT_RGB8))
#print 'VIDEO AND FRAME RATE: {0}\n\n'.format(ptgrey.SetVideoModeAndFrameRate(video_mode=ptgrey.GetFc2().VIDEOMODE_FORMAT7)) # Unknown error when setting video mode and frame rate
print 'FORMAT7: {0}\n\n'.format(ptgrey.SetFormat7Configuration(mode=ptgrey.GetFc2().MODE_0, pixel_format=ptgrey.GetFc2().PIXEL_FORMAT_RAW8))
print 'CONFIGURATIONS: {0}\n\n'.format(ptgrey.SetConfiguration(num_buffers=3))
ptgrey.StartCapture()
print "######## SET MANUAL ##########\n\n"
''' FOR RGB8 FRAME SETTINGS '''
# print 'FORMAT7 (warning): {0}\n\n'.format(ptgrey.SetFormat7Configuration()) # Expect warning
# print 'VIDEO AND FRAME RATE (warning): {0}\n\n'.format(ptgrey.SetVideoModeAndFrameRate()) #Expect warning
# print 'CONFIGURATIONS (warning): {0}\n\n'.format(ptgrey.SetConfiguration()) # Expect warning
# print 'FRAME RATE: {0}\n\n'.format(ptgrey.SetFrameRate(32.0))
# print 'SET GAIN: {0}\n\n'.format(ptgrey.SetGain(7.3, auto=False))
# print 'SET SHUTTER: {0}\n\n'.format(ptgrey.SetShutter(300.52, auto=False))
# print 'SET BRIGHTNESS: {0}\n\n'.format(ptgrey.SetBrightness(0.5))
# print 'SET AUTO EXPOSURE: {0}\n\n'.format(ptgrey.SetAutoExposure(1.34, auto=False))
# #print 'SET SHARPNESS: {0}\n\n'.format(ptgrey.SetSharpness(7.3, auto=False)) # Unkown api error
# print 'SET GAMMA: {0}\n\n'.format(ptgrey.SetGamma(1.5, auto=False))
# print 'SET WHITE BALANCE: {0}\n\n'.format(ptgrey.SetWhiteBalance(1536, 0, auto=False))
''' FOR RAW8 FRAME SETTINGS '''
print 'FORMAT7 (warning): {0}\n\n'.format(ptgrey.SetFormat7Configuration()) # Expect warning
print 'VIDEO AND FRAME RATE (warning): {0}\n\n'.format(ptgrey.SetVideoModeAndFrameRate()) #Expect warning
print 'CONFIGURATIONS (warning): {0}\n\n'.format(ptgrey.SetConfiguration()) # Expect warning
print 'FRAME RATE: {0}\n\n'.format(ptgrey.SetFrameRate(32.0))
print 'SET GAIN: {0}\n\n'.format(ptgrey.SetGain(28.09, auto=False))
print 'SET SHUTTER: {0}\n\n'.format(ptgrey.SetShutter(109.61, auto=False))
print 'SET BRIGHTNESS: {0}\n\n'.format(ptgrey.SetBrightness(3.0))
print 'SET AUTO EXPOSURE: {0}\n\n'.format(ptgrey.SetAutoExposure(0.923, auto=False))
#print 'SET SHARPNESS: {0}\n\n'.format(ptgrey.SetSharpness(28.1, auto=False)) # Unkown api error
print 'SET GAMMA: {0}\n\n'.format(ptgrey.SetGamma(1.5, auto=False))
print 'SET WHITE BALANCE: {0}\n\n'.format(ptgrey.SetWhiteBalance(1536, 0, auto=False))
if self.ptgrey_grab_infinite:
#realTimePlot = RealTimePlot(interactive_mode=True)
realTimePlot = PyQtImage(True)
settings.ChangeSetting('USER_INPUT', 'automatic_mode', True)
userInput = UserInput(settings.GetSettings('USER_INPUT'))
while True:
try:
frame = ptgrey.CaptureFrame()
except PtGreyError, err:
warnings.simplefilter('always')
warnings.warn(str(err), Warning)
warnings.simplefilter('default')
ptgrey.RestartCapture()
continue
print 'Captured frame with shape: {0} and type: {1}'.format(frame.shape, frame.dtype)
matplotlist = [('Frame', frame)]
print "######## SET AUTO ##########\n\n"
print 'SET GAIN: {0}\n\n'.format(ptgrey.SetGain(auto=True))
print 'SET SHUTTER: {0}\n\n'.format(ptgrey.SetShutter(auto=True))
print 'SET BRIGHTNESS: {0}\n\n'.format(ptgrey.SetBrightness(3.0))
print 'SET AUTO EXPOSURE: {0}\n\n'.format(ptgrey.SetAutoExposure(auto=True))
#print 'SET SHARPNESS: {0}\n\n'.format(ptgrey.SetSharpness(auto=True)) # Unkown api error
print 'SET GAMMA: {0}\n\n'.format(ptgrey.SetGamma(auto=True))
print 'SET WHITE BALANCE: {0}\n\n'.format(ptgrey.SetWhiteBalance(auto=True))
if not(self.CheckAllTests()):
if self.ptgrey_grab_infinite:
#realTimePlot(matplotlist)
realTimePlot.UpdatePQImages(matplotlist)
if userInput.CheckTerminated():
break
else:
MatplotShow(matplotlist, save_fig=self.save_figs, save_fig_only=self.save_figs_only)
if settings.GetSettings('CAMERA', 'manual_triggering'):
q = getpass('Enter "q" to quit, or nothing to continue..')
if q == 'q':
break
else:
break
ptgrey.DisconnectCamera()
| {
"content_hash": "45d80bc52810004410fdeb1361ccfcfa",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 181,
"avg_line_length": 43.96212121212121,
"alnum_prop": 0.6998104428743753,
"repo_name": "hansehe/Wind-Blade-Inspection",
"id": "df512af487e1ee74dd636cf9a693f10ef0d0f137",
"size": "5803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestUnits/Test_src/Test_DroneVision/Test_DroneVision_src/Test_hardware/Test_PtGrey/Test_PtGreyJordens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2183232"
}
],
"symlink_target": ""
} |
__author__ = 'SL_RU'
### Логика музыкального плеера
import os
import aplayer
import random
import json
import aplayer
import musicplaylist
import audiobook
import time
def log(s):
print("BOOKS_PLAYER:" + s)
class BooksPlayer(object):
aplayer = None
books = list()
cur_book = None
path = ""
cur_audio = ""
def __init__(self, audioPlayer, path):
self.set_aplayer(audioPlayer)
self.path = path
#self.create_playlist()
self.turn_on()
def refresh_path(self):
"""Find and enumerate books in the path"""
self.books = list()
wa = list(os.walk(self.path))[0]
for i in wa[1]:
self.books.append(i)
def play_audio_by_name(self, name, offset=0):
if(self.aplayer is not None) and os.path.isfile(self.path + name):
b = self.aplayer.play_file(self.path + name)
time.sleep(0.1)
self.aplayer.set_pos(offset)
self.cur_audio = name
log("playing " + name)
return b
else:
return False
def set_aplayer(self, apl):
if(self.aplayer is not None):
self.aplayer.turn_off()
self.update_book_state()
self.aplayer = apl
def get_aplayer(self):
return aplayer
def play_book(self, name):
"""Starting or continueing playing book. If other book playing in this time, it will be saved and stopped"""
self.update_book_state()
if(self.cur_book is not None):
self.cur_book.save()
self.cur_book = audiobook.Audiobook(self, name)
log("Cur book is " + name)
def play(self):
if(self.aplayer is not None):
self.aplayer.play()
def pause(self):
self.update_book_state()
if(self.aplayer is not None):
self.aplayer.pause()
def play_forw(self):
self.update_book_state()
a = self.cur_book.get_cur_file_and_time()
log(a)
self.play_audio_by_name(a)
def play_back(self):
self.update_book_state()
def load(self):
if(os.path.isfile(self.path + "booksplayer.json")):
with open(self.path + "booksplayer.json", "r") as fl:
dt = json.load(fl)
fl.close()
def save(self):
dt = {
}
if(self.cur_book is not None):
self.cur_book.save()
self.update_book_state()
with open(self.path + "booksplayer.json", "w") as fl:
json.dump(dt, fl)
fl.close()
def update_book_state(self):
try:
if(self.cur_book is not None):
time.sleep(0.05)
self.cur_book.set_cur_state(audio=self.cur_book.player_to_local_path(self.cur_audio), time=self.aplayer.get_pos())
except:
pass
def turn_on(self):
self.refresh_path()
self.aplayer.add_endevent(self.on_audio_end)
def turn_off(self):
if(self.aplayer is not None):
self.aplayer.turn_off()
self.save()
def get_type(self):
return "book_player"
def on_audio_end(self):
self.play_forw()
def play_pos(self, p):
b = self.cur_book.get_audio_and_time_by_pos(p)
self.play_audio_by_name(b[0], offset=b[1])
| {
"content_hash": "c9d2bcf23fc5764e26e261aa418d8e3f",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 130,
"avg_line_length": 26.504,
"alnum_prop": 0.555086024750981,
"repo_name": "SL-RU/RaspiBluePlayer",
"id": "9a7c83193f9130d53336fca93a9353c40bfe3062",
"size": "3337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booksplayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28504"
},
{
"name": "HTML",
"bytes": "7028"
},
{
"name": "Python",
"bytes": "314400"
}
],
"symlink_target": ""
} |
'''
@author: sheng
@license:
'''
SPELL=u'shànglián'
CN=u'上廉'
NAME=u'shanglian42'
CHANNEL='largeintestine'
CHANNEL_FULLNAME='LargeIntestineChannelofHand-Yangming'
SEQ='LI9'
if __name__ == '__main__':
pass
| {
"content_hash": "c523d942cbd5696c9ac25671960804f0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 55,
"avg_line_length": 13.3125,
"alnum_prop": 0.6807511737089202,
"repo_name": "sinotradition/meridian",
"id": "cd956b8ef88e061148ca5173bcf914b0c3b094e6",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meridian/acupoints/shanglian42.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "239622"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_ntp
short_description: NetApp ONTAP NTP server
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or delete or modify NTP server in ONTAP
options:
state:
description:
- Whether the specified NTP server should exist or not.
choices: ['present', 'absent']
default: 'present'
server_name:
description:
- The name of the NTP server to manage.
required: True
version:
description:
- give version for NTP server
choices: ['auto', '3', '4']
default: 'auto'
"""
EXAMPLES = """
- name: Create NTP server
na_ontap_ntp:
state: present
version: auto
server_name: "{{ server_name }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete NTP server
na_ontap_ntp:
state: absent
server_name: "{{ server_name }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapNTPServer(object):
""" object initialize and class methods """
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
server_name=dict(required=True, type='str'),
version=dict(required=False, type='str', default='auto',
choices=['auto', '3', '4']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.state = parameters['state']
self.server_name = parameters['server_name']
self.version = parameters['version']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_ntp_server(self):
"""
Return details about the ntp server
:param:
name : Name of the server_name
:return: Details about the ntp server. None if not found.
:rtype: dict
"""
ntp_iter = netapp_utils.zapi.NaElement('ntp-server-get-iter')
ntp_info = netapp_utils.zapi.NaElement('ntp-server-info')
ntp_info.add_new_child('server-name', self.server_name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(ntp_info)
ntp_iter.add_child_elem(query)
result = self.server.invoke_successfully(ntp_iter, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
ntp_server_name = result.get_child_by_name('attributes-list').\
get_child_by_name('ntp-server-info').\
get_child_content('server-name')
server_version = result.get_child_by_name('attributes-list').\
get_child_by_name('ntp-server-info').\
get_child_content('version')
return_value = {
'server-name': ntp_server_name,
'version': server_version
}
return return_value
def create_ntp_server(self):
"""
create ntp server.
"""
ntp_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
'ntp-server-create', **{'server-name': self.server_name,
'version': self.version
})
try:
self.server.invoke_successfully(ntp_server_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating ntp server %s: %s'
% (self.server_name, to_native(error)),
exception=traceback.format_exc())
def delete_ntp_server(self):
"""
delete ntp server.
"""
ntp_server_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'ntp-server-delete', **{'server-name': self.server_name})
try:
self.server.invoke_successfully(ntp_server_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting ntp server %s: %s'
% (self.server_name, to_native(error)),
exception=traceback.format_exc())
def modify_version(self):
"""
modify the version.
"""
ntp_modify_versoin = netapp_utils.zapi.NaElement.create_node_with_children(
'ntp-server-modify',
**{'server-name': self.server_name, 'version': self.version})
try:
self.server.invoke_successfully(ntp_modify_versoin,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying version for ntp server %s: %s'
% (self.server_name, to_native(error)),
exception=traceback.format_exc())
def apply(self):
"""Apply action to ntp-server"""
changed = False
ntp_modify = False
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_ntp", cserver)
ntp_server_details = self.get_ntp_server()
if ntp_server_details is not None:
if self.state == 'absent': # delete
changed = True
elif self.state == 'present' and self.version:
# modify version
if self.version != ntp_server_details['version']:
ntp_modify = True
changed = True
else:
if self.state == 'present': # create
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if ntp_server_details is None:
self.create_ntp_server()
elif ntp_modify:
self.modify_version()
elif self.state == 'absent':
self.delete_ntp_server()
self.module.exit_json(changed=changed)
def main():
""" Create object and call apply """
ntp_obj = NetAppOntapNTPServer()
ntp_obj.apply()
if __name__ == '__main__':
main()
| {
"content_hash": "b5f1e643b1e4b824ba64ba85242762cc",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 85,
"avg_line_length": 34.41628959276018,
"alnum_prop": 0.5512753089666053,
"repo_name": "SergeyCherepanov/ansible",
"id": "6c578805876a9cd3e9eb5943ae9689c931f8019d",
"size": "7748",
"binary": false,
"copies": "37",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/storage/netapp/na_ontap_ntp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
try:
# This API exists only in Python 2.6 and higher. :(
import multiprocessing
except ImportError:
multiprocessing = None
import ctypes
import errno
import logging
import os
import platform
import StringIO
import signal
import subprocess
import sys
import time
from webkitpy.common.system.deprecated_logging import tee
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.python24 import versioning
_log = logging.getLogger("webkitpy.common.system")
class ScriptError(Exception):
# This is a custom List.__str__ implementation to allow size limiting.
def _string_from_args(self, args, limit=100):
args_string = unicode(args)
# We could make this much fancier, but for now this is OK.
if len(args_string) > limit:
return args_string[:limit - 3] + "..."
return args_string
def __init__(self,
message=None,
script_args=None,
exit_code=None,
output=None,
cwd=None):
if not message:
message = 'Failed to run "%s"' % self._string_from_args(script_args)
if exit_code:
message += " exit_code: %d" % exit_code
if cwd:
message += " cwd: %s" % cwd
Exception.__init__(self, message)
self.script_args = script_args # 'args' is already used by Exception
self.exit_code = exit_code
self.output = output
self.cwd = cwd
def message_with_output(self, output_limit=500):
if self.output:
if output_limit and len(self.output) > output_limit:
return u"%s\n\nLast %s characters of output:\n%s" % \
(self, output_limit, self.output[-output_limit:])
return u"%s\n\n%s" % (self, self.output)
return unicode(self)
def command_name(self):
command_path = self.script_args
if type(command_path) is list:
command_path = command_path[0]
return os.path.basename(command_path)
def run_command(*args, **kwargs):
# FIXME: This should not be a global static.
# New code should use Executive.run_command directly instead
return Executive().run_command(*args, **kwargs)
class Executive(object):
def _should_close_fds(self):
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
# multiple threads). See http://bugs.python.org/issue2320 .
# Note that close_fds isn't supported on Windows, but this bug only
# shows up on Mac and Linux.
return sys.platform not in ('win32', 'cygwin')
def _run_command_with_teed_output(self, args, teed_output):
args = map(unicode, args) # Popen will throw an exception if args are non-strings (like int())
args = map(self._encode_argument_if_needed, args)
child_process = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=self._should_close_fds())
# Use our own custom wait loop because Popen ignores a tee'd
# stderr/stdout.
# FIXME: This could be improved not to flatten output to stdout.
while True:
output_line = child_process.stdout.readline()
if output_line == "" and child_process.poll() != None:
# poll() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
return child_process.poll()
# We assume that the child process wrote to us in utf-8,
# so no re-encoding is necessary before writing here.
teed_output.write(output_line)
# FIXME: Remove this deprecated method and move callers to run_command.
# FIXME: This method is a hack to allow running command which both
# capture their output and print out to stdin. Useful for things
# like "build-webkit" where we want to display to the user that we're building
# but still have the output to stuff into a log file.
def run_and_throw_if_fail(self, args, quiet=False, decode_output=True):
# Cache the child's output locally so it can be used for error reports.
child_out_file = StringIO.StringIO()
tee_stdout = sys.stdout
if quiet:
dev_null = open(os.devnull, "w") # FIXME: Does this need an encoding?
tee_stdout = dev_null
child_stdout = tee(child_out_file, tee_stdout)
exit_code = self._run_command_with_teed_output(args, child_stdout)
if quiet:
dev_null.close()
child_output = child_out_file.getvalue()
child_out_file.close()
if decode_output:
child_output = child_output.decode(self._child_process_encoding())
if exit_code:
raise ScriptError(script_args=args,
exit_code=exit_code,
output=child_output)
return child_output
def cpu_count(self):
if multiprocessing:
return multiprocessing.cpu_count()
# Darn. We don't have the multiprocessing package.
system_name = platform.system()
if system_name == "Darwin":
return int(self.run_command(["sysctl", "-n", "hw.ncpu"]))
elif system_name == "Windows":
return int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
elif system_name == "Linux":
num_cores = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(num_cores, int) and num_cores > 0:
return num_cores
# This quantity is a lie but probably a reasonable guess for modern
# machines.
return 2
@staticmethod
def interpreter_for_script(script_path, fs=FileSystem()):
lines = fs.read_text_file(script_path).splitlines()
if not len(lines):
return None
first_line = lines[0]
if not first_line.startswith('#!'):
return None
if first_line.find('python') > -1:
return sys.executable
if first_line.find('perl') > -1:
return 'perl'
if first_line.find('ruby') > -1:
return 'ruby'
return None
def kill_process(self, pid):
"""Attempts to kill the given pid.
Will fail silently if pid does not exist or insufficient permisssions."""
if sys.platform == "win32":
# We only use taskkill.exe on windows (not cygwin) because subprocess.pid
# is a CYGWIN pid and taskkill.exe expects a windows pid.
# Thankfully os.kill on CYGWIN handles either pid type.
command = ["taskkill.exe", "/f", "/pid", pid]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# According to http://docs.python.org/library/os.html
# os.kill isn't available on Windows. python 2.5.5 os.kill appears
# to work in cygwin, however it occasionally raises EAGAIN.
retries_left = 10 if sys.platform == "cygwin" else 1
while retries_left > 0:
try:
retries_left -= 1
os.kill(pid, signal.SIGKILL)
except OSError, e:
if e.errno == errno.EAGAIN:
if retries_left <= 0:
_log.warn("Failed to kill pid %s. Too many EAGAIN errors." % pid)
continue
if e.errno == errno.ESRCH: # The process does not exist.
_log.warn("Called kill_process with a non-existant pid %s" % pid)
return
raise
def _win32_check_running_pid(self, pid):
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.c_ulong),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
TH32CS_SNAPPROCESS = 0x00000002 # win32 magic number
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
result = False
if not Process32First(hProcessSnap, ctypes.byref(pe32)):
_log.debug("Failed getting first process.")
CloseHandle(hProcessSnap)
return result
while True:
if pe32.th32ProcessID == pid:
result = True
break
if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
break
CloseHandle(hProcessSnap)
return result
def check_running_pid(self, pid):
"""Return True if pid is alive, otherwise return False."""
if sys.platform in ('darwin', 'linux2', 'cygwin'):
try:
os.kill(pid, 0)
return True
except OSError:
return False
elif sys.platform == 'win32':
return self._win32_check_running_pid(pid)
assert(False)
def _windows_image_name(self, process_name):
name, extension = os.path.splitext(process_name)
if not extension:
# taskkill expects processes to end in .exe
# If necessary we could add a flag to disable appending .exe.
process_name = "%s.exe" % name
return process_name
def kill_all(self, process_name):
"""Attempts to kill processes matching process_name.
Will fail silently if no process are found."""
if sys.platform in ("win32", "cygwin"):
image_name = self._windows_image_name(process_name)
command = ["taskkill.exe", "/f", "/im", image_name]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# FIXME: This is inconsistent that kill_all uses TERM and kill_process
# uses KILL. Windows is always using /f (which seems like -KILL).
# We should pick one mode, or add support for switching between them.
# Note: Mac OS X 10.6 requires -SIGNALNAME before -u USER
command = ["killall", "-TERM", "-u", os.getenv("USER"), process_name]
# killall returns 1 if no process can be found and 2 on command error.
# FIXME: We should pass a custom error_handler to allow only exit_code 1.
# We should log in exit_code == 1
self.run_command(command, error_handler=self.ignore_error)
# Error handlers do not need to be static methods once all callers are
# updated to use an Executive object.
@staticmethod
def default_error_handler(error):
raise error
@staticmethod
def ignore_error(error):
pass
def _compute_stdin(self, input):
"""Returns (stdin, string_to_communicate)"""
# FIXME: We should be returning /dev/null for stdin
# or closing stdin after process creation to prevent
# child processes from getting input from the user.
if not input:
return (None, None)
if hasattr(input, "read"): # Check if the input is a file.
return (input, None) # Assume the file is in the right encoding.
# Popen in Python 2.5 and before does not automatically encode unicode objects.
# http://bugs.python.org/issue5290
# See https://bugs.webkit.org/show_bug.cgi?id=37528
# for an example of a regresion caused by passing a unicode string directly.
# FIXME: We may need to encode differently on different platforms.
if isinstance(input, unicode):
input = input.encode(self._child_process_encoding())
return (subprocess.PIPE, input)
def _command_for_printing(self, args):
"""Returns a print-ready string representing command args.
The string should be copy/paste ready for execution in a shell."""
escaped_args = []
for arg in args:
if isinstance(arg, unicode):
# Escape any non-ascii characters for easy copy/paste
arg = arg.encode("unicode_escape")
# FIXME: Do we need to fix quotes here?
escaped_args.append(arg)
return " ".join(escaped_args)
# FIXME: run_and_throw_if_fail should be merged into this method.
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=True):
"""Popen wrapper for convenience and to work around python bugs."""
assert(isinstance(args, list) or isinstance(args, tuple))
start_time = time.time()
args = map(unicode, args) # Popen will throw an exception if args are non-strings (like int())
args = map(self._encode_argument_if_needed, args)
stdin, string_to_communicate = self._compute_stdin(input)
stderr = subprocess.STDOUT if return_stderr else None
process = subprocess.Popen(args,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr,
cwd=cwd,
close_fds=self._should_close_fds())
output = process.communicate(string_to_communicate)[0]
# run_command automatically decodes to unicode() unless explicitly told not to.
if decode_output:
output = output.decode(self._child_process_encoding())
# wait() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
exit_code = process.wait()
_log.debug('"%s" took %.2fs' % (self._command_for_printing(args), time.time() - start_time))
if return_exit_code:
return exit_code
if exit_code:
script_error = ScriptError(script_args=args,
exit_code=exit_code,
output=output,
cwd=cwd)
(error_handler or self.default_error_handler)(script_error)
return output
def _child_process_encoding(self):
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and versioning.compare_version(sys, '3.0')[0] < 0:
return 'mbcs'
# All other platforms use UTF-8.
# FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands
# which will expect arguments to be encoded using the current code
# page.
return 'utf-8'
def _should_encode_child_process_arguments(self):
# Cygwin's Python's os.execv doesn't support unicode command
# arguments, and neither does Cygwin's execv itself.
if sys.platform == 'cygwin':
return True
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and versioning.compare_version(sys, '3.0')[0] < 0:
return True
return False
def _encode_argument_if_needed(self, argument):
if not self._should_encode_child_process_arguments():
return argument
return argument.encode(self._child_process_encoding())
| {
"content_hash": "1a50b6b8cf12354e852a0a1d6ea99619",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 103,
"avg_line_length": 41.75949367088607,
"alnum_prop": 0.5871476204910578,
"repo_name": "mogoweb/webkit_for_android5.1",
"id": "7d198dd43ea2532f8ac0024cac4416c46fbdfd71",
"size": "18079",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "webkit/Tools/Scripts/webkitpy/common/system/executive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "6772"
},
{
"name": "Assembly",
"bytes": "26025"
},
{
"name": "Awk",
"bytes": "2800"
},
{
"name": "Batchfile",
"bytes": "57337"
},
{
"name": "C",
"bytes": "7713030"
},
{
"name": "C++",
"bytes": "153178707"
},
{
"name": "CMake",
"bytes": "192330"
},
{
"name": "CSS",
"bytes": "483041"
},
{
"name": "Common Lisp",
"bytes": "9920"
},
{
"name": "DIGITAL Command Language",
"bytes": "5243"
},
{
"name": "DTrace",
"bytes": "1931"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "HTML",
"bytes": "14998422"
},
{
"name": "Java",
"bytes": "1522083"
},
{
"name": "JavaScript",
"bytes": "18008829"
},
{
"name": "Lex",
"bytes": "42554"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "M4",
"bytes": "49839"
},
{
"name": "Makefile",
"bytes": "476166"
},
{
"name": "Module Management System",
"bytes": "9756"
},
{
"name": "Objective-C",
"bytes": "2798053"
},
{
"name": "Objective-C++",
"bytes": "7846322"
},
{
"name": "PHP",
"bytes": "66595"
},
{
"name": "Perl",
"bytes": "1130475"
},
{
"name": "Perl 6",
"bytes": "445215"
},
{
"name": "Python",
"bytes": "5503045"
},
{
"name": "QML",
"bytes": "3331"
},
{
"name": "QMake",
"bytes": "294800"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Roff",
"bytes": "273562"
},
{
"name": "Ruby",
"bytes": "81928"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "488223"
},
{
"name": "Yacc",
"bytes": "153801"
},
{
"name": "xBase",
"bytes": "328"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_APPLE_float_pixels'
_p.unpack_constants( """GL_HALF_APPLE 0x140B
GL_RGBA_FLOAT32_APPLE 0x8814
GL_RGB_FLOAT32_APPLE 0x8815
GL_ALPHA_FLOAT32_APPLE 0x8816
GL_INTENSITY_FLOAT32_APPLE 0x8817
GL_LUMINANCE_FLOAT32_APPLE 0x8818
GL_LUMINANCE_ALPHA_FLOAT32_APPLE 0x8819
GL_RGBA_FLOAT16_APPLE 0x881A
GL_RGB_FLOAT16_APPLE 0x881B
GL_ALPHA_FLOAT16_APPLE 0x881C
GL_INTENSITY_FLOAT16_APPLE 0x881D
GL_LUMINANCE_FLOAT16_APPLE 0x881E
GL_LUMINANCE_ALPHA_FLOAT16_APPLE 0x881F
GL_COLOR_FLOAT_APPLE 0x8A0F""", globals())
def glInitFloatPixelsAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "ef5218052d8004ae70507d8e0bd71bf9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 71,
"avg_line_length": 34.833333333333336,
"alnum_prop": 0.7954545454545454,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "ba52be7159b0c5fccd286b774738461967222270",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/APPLE/float_pixels.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
from . import domainresource
class DetectedIssue(domainresource.DomainResource):
""" Clinical issue with action.
Indicates an actual or potential clinical issue with or between one or more
active or proposed clinical actions for a patient; e.g. Drug-drug
interaction, Ineffective treatment frequency, Procedure-condition conflict,
etc.
"""
resource_type = "DetectedIssue"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" The provider or device that identified the issue.
Type `FHIRReference` (represented as `dict` in JSON). """
self.code = None
""" Issue Category, e.g. drug-drug, duplicate therapy, etc..
Type `CodeableConcept` (represented as `dict` in JSON). """
self.detail = None
""" Description and context.
Type `str`. """
self.evidence = None
""" Supporting evidence.
List of `DetectedIssueEvidence` items (represented as `dict` in JSON). """
self.identifiedDateTime = None
""" When identified.
Type `FHIRDate` (represented as `str` in JSON). """
self.identifiedPeriod = None
""" When identified.
Type `Period` (represented as `dict` in JSON). """
self.identifier = None
""" Unique id for the detected issue.
List of `Identifier` items (represented as `dict` in JSON). """
self.implicated = None
""" Problem resource.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.mitigation = None
""" Step taken to address.
List of `DetectedIssueMitigation` items (represented as `dict` in JSON). """
self.patient = None
""" Associated patient.
Type `FHIRReference` (represented as `dict` in JSON). """
self.reference = None
""" Authority for issue.
Type `str`. """
self.severity = None
""" high | moderate | low.
Type `str`. """
self.status = None
""" registered | preliminary | final | amended +.
Type `str`. """
super(DetectedIssue, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DetectedIssue, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("detail", "detail", str, False, None, False),
("evidence", "evidence", DetectedIssueEvidence, True, None, False),
("identifiedDateTime", "identifiedDateTime", fhirdate.FHIRDate, False, "identified", False),
("identifiedPeriod", "identifiedPeriod", period.Period, False, "identified", False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("implicated", "implicated", fhirreference.FHIRReference, True, None, False),
("mitigation", "mitigation", DetectedIssueMitigation, True, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, False),
("reference", "reference", str, False, None, False),
("severity", "severity", str, False, None, False),
("status", "status", str, False, None, True),
])
return js
from . import backboneelement
class DetectedIssueEvidence(backboneelement.BackboneElement):
""" Supporting evidence.
Supporting evidence or manifestations that provide the basis for
identifying the detected issue such as a GuidanceResponse or MeasureReport.
"""
resource_type = "DetectedIssueEvidence"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Manifestation.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.detail = None
""" Supporting information.
List of `FHIRReference` items (represented as `dict` in JSON). """
super(DetectedIssueEvidence, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DetectedIssueEvidence, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, True, None, False),
("detail", "detail", fhirreference.FHIRReference, True, None, False),
])
return js
class DetectedIssueMitigation(backboneelement.BackboneElement):
""" Step taken to address.
Indicates an action that has been taken or is committed to reduce or
eliminate the likelihood of the risk identified by the detected issue from
manifesting. Can also reflect an observation of known mitigating factors
that may reduce/eliminate the need for any action.
"""
resource_type = "DetectedIssueMitigation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" What mitigation?.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.author = None
""" Who is committing?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.date = None
""" Date committed.
Type `FHIRDate` (represented as `str` in JSON). """
super(DetectedIssueMitigation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DetectedIssueMitigation, self).elementProperties()
js.extend([
("action", "action", codeableconcept.CodeableConcept, False, None, True),
("author", "author", fhirreference.FHIRReference, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| {
"content_hash": "d952af5c83e447edda07e2a538e5f950",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 104,
"avg_line_length": 38.08629441624365,
"alnum_prop": 0.6170864987338398,
"repo_name": "all-of-us/raw-data-repository",
"id": "e2201a7a7bbb693c6f88b38f61126bd5ee3147de",
"size": "7690",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_4_0_0/models/detectedissue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""
Virtual namespace for other pacakges to extend the growler server
"""
from typing import Dict
from types import ModuleType
import sys
from importlib import (
import_module,
)
class GrowlerExtensionImporter:
__path__ = 'growler.ext'
__name__ = 'GrowlerExtensionImporter'
__mods__: Dict[str, ModuleType] = {}
def __getattr__(self, module_name):
"""
Get the 'attribute' of growler.ext, which looks for the module in the
python virtual namespace growler_ext
"""
try:
result = self.__mods__[module_name]
except KeyError:
# import the 'real' module
result = import_module('growler_ext.' + module_name)
# store alias in sys.modules
alias_mod_name = 'growler.ext.' + module_name
sys.modules[alias_mod_name] = result
# cache in this object
self.__mods__[module_name] = result
return result
sys.modules[__name__] = GrowlerExtensionImporter()
| {
"content_hash": "1e2fd375a3b5dacc94c9c20b31076499",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 24.853658536585368,
"alnum_prop": 0.6025515210991168,
"repo_name": "pyGrowler/Growler",
"id": "937833ccf4799dceeea3331498e7f25de40eb64b",
"size": "1049",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "growler/ext/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249705"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from ..nx import NetworkXMetrics
def test_NetworkXMetrics_inputs():
input_map = dict(compute_clique_related_measures=dict(usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(mandatory=True,
),
out_edge_metrics_matlab=dict(genfile=True,
),
out_global_metrics_matlab=dict(genfile=True,
),
out_k_core=dict(usedefault=True,
),
out_k_crust=dict(usedefault=True,
),
out_k_shell=dict(usedefault=True,
),
out_node_metrics_matlab=dict(genfile=True,
),
out_pickled_extra_measures=dict(usedefault=True,
),
treat_as_weighted_graph=dict(usedefault=True,
),
)
inputs = NetworkXMetrics.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_NetworkXMetrics_outputs():
output_map = dict(edge_measure_networks=dict(),
edge_measures_matlab=dict(),
global_measures_matlab=dict(),
gpickled_network_files=dict(),
k_core=dict(),
k_crust=dict(),
k_networks=dict(),
k_shell=dict(),
matlab_dict_measures=dict(),
matlab_matrix_files=dict(),
node_measure_networks=dict(),
node_measures_matlab=dict(),
pickled_extra_measures=dict(),
)
outputs = NetworkXMetrics.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| {
"content_hash": "196acdd9aa3c83bbf0a5f7613c9b5986",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 74,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.651875,
"repo_name": "mick-d/nipype",
"id": "46c077af1bfcc1b48bd7782dc8366b9b63e35229",
"size": "1654",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""A wrapper around the PX-Web API.
As implementations and versions vary, this is best used as a base class,
for more specific scrapers to extend.
If used directly, an API endpoint must be set:
scraper = PXWeb(base_url="http://api.example.com/")
# ...or:
scraper = PXWeb()
scraper.base_url = "http://api.example.com/"
"""
import requests
from statscraper import (BaseScraper, Collection, Result,
Dataset, Dimension, InvalidData)
from statscraper.compat import JSONDecodeError
class PXWeb(BaseScraper):
"""Scraper."""
base_url = None # API endpoint
@BaseScraper.on("init")
def _get_args(self, *args, **kwargs):
"""Store `base_url`, if given on init.
This is convenient when the PXWeb scraper is used directly by an end user.
"""
if "base_url" in kwargs and kwargs["base_url"]:
self.base_url = kwargs["base_url"]
def _api_path(self, item):
"""Get the API path for the current cursor position."""
if self.base_url is None:
raise NotImplementedError("base_url not set")
path = "/".join([x.blob["id"] for x in item.path])
return "/".join([self.base_url, path])
def _fetch_itemslist(self, item):
data = requests.get(self._api_path(item)).json()
for d in data:
if d["type"] == "l":
yield Collection(d["id"], label=d["text"], blob=d)
else:
yield Dataset(d["id"], label=d["text"], blob=d)
def _fetch_dimensions(self, dataset):
data = requests.get(self._api_path(dataset)).json()
try:
for d in data["variables"]:
yield Dimension(d["code"],
label=d["text"],
allowed_values=d["values"])
except KeyError:
yield None
def _fetch_data(self, dataset, query):
if query is None:
query = {}
body = {
'query': [{
'code': key,
'selection': {
'filter': filtertype,
# value can be a list or a value
'values': value if isinstance(value, list) else [value]
}
} for key, (filtertype, value) in query.items()],
'response': {
'format': "json"
}
}
try:
raw = requests.post(self._api_path(dataset), json=body)
if raw.headers["content-type"] == "text/html":
# This is an error message
raise(InvalidData(f"""Error message from PX Web:
{raw.content}
Check your query for spelling errors, or try reducing the size.
"""))
data = raw.json()
except JSONDecodeError:
raise InvalidData("""No valid response from PX Web.
Check your query for spelling errors, or try reducing the size.
This error is frequently due to a too large result being requested.""")
# All available dimensions are not always returned.
# What is returned depends on the query
raw_return_dimension = data["columns"]
# Filter out dimensions only
raw_return_dimension = [x for x in raw_return_dimension if x["type"] != "c"]
for row in data[u"data"]:
for value in row[u"values"]:
dimensions = {}
# 'key' contains one value for each dimension,
# always preserving order.
for d, v in zip(raw_return_dimension, row[u"key"]):
dimensions[d["code"]] = v
yield Result(value, dimensions=dimensions)
| {
"content_hash": "ff1f0e0b81b95a2b0d9f11b55af637bf",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 84,
"avg_line_length": 34.56603773584906,
"alnum_prop": 0.5477620087336245,
"repo_name": "jplusplus/statscraper",
"id": "fe893aebdc935d3ac6c63daa97abf70a7fc7803d",
"size": "3664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statscraper/scrapers/PXWebScraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108598"
}
],
"symlink_target": ""
} |
from elasticsearch_dsl.search import Q, Search
def test_count_all(data_client):
s = Search(using=data_client).index("git")
assert 53 == s.count()
def test_count_prefetch(data_client, mocker):
mocker.spy(data_client, "count")
search = Search(using=data_client).index("git")
search.execute()
assert search.count() == 53
assert data_client.count.call_count == 0
search._response.hits.total.relation = "gte"
assert search.count() == 53
assert data_client.count.call_count == 1
def test_count_filter(data_client):
s = Search(using=data_client).index("git").filter(~Q("exists", field="parent_shas"))
# initial commit + repo document
assert 2 == s.count()
| {
"content_hash": "10a0676c013f042fcfb7090bc22679e4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 88,
"avg_line_length": 28.36,
"alnum_prop": 0.6685472496473907,
"repo_name": "elastic/elasticsearch-dsl-py",
"id": "4b2ed9584c3adfa20c24bf7f6c394267bf6ca425",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_integration/test_count.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "423092"
}
],
"symlink_target": ""
} |
import discord
from discord.ext import commands
import random
class Keks:
"""
The Keks Cog
"""
def __init__(self, bot):
self.bot = bot
self.cookie_answers = [
"Oh vielen Dank!",
"Om nom nom :yum:",
"Ich liebe Kekse! :laughing:",
":blush:"
]
@commands.command(name="keks", pass_context=True)
async def _give_cookie(self, ctx):
"""
Give the bot a cookie
"""
await self.bot.say(random.choice(self.cookie_answers))
def setup(bot):
n = Keks(bot)
bot.add_cog(n)
| {
"content_hash": "d3cdcd067c3906e09d48d557c405a601",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 62,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.5285234899328859,
"repo_name": "failgod-marcus/failCogs",
"id": "b5bf2c4db54eb7a30acc319ae7caa1b3fd422378",
"size": "596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keks/keks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35725"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import Q
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int, to_non_negative_float
from django.utils.html import escape as escape_html
from django.views.decorators.csrf import csrf_exempt
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients
from zerver.lib.cache import generic_bulk_cached_fetch
from zerver.lib.query import last_n
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, valid_stream_name, \
parse_usermessage_flags, to_dict_cache_key_id, extract_message_dict, \
stringify_message_dict, \
resolve_email_to_domain, get_realm, get_active_streams, \
bulk_get_streams
import sqlalchemy
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias
import re
import ujson
from zerver.lib.rest import rest_dispatch as _rest_dispatch
from six.moves import map
import six
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
# This is a Pool that doesn't close connections. Therefore it can be used with
# existing Django database connections.
class NonClosingPool(sqlalchemy.pool.NullPool):
def status(self):
return "NonClosingPool"
def _do_return_conn(self, conn):
pass
def recreate(self):
return self.__class__(creator=self._creator, # type: ignore # __class__
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
sqlalchemy_engine = None
def get_sqlalchemy_connection():
global sqlalchemy_engine
if sqlalchemy_engine is None:
def get_dj_conn():
connection.ensure_connection()
return connection.connection
sqlalchemy_engine = sqlalchemy.create_engine('postgresql://',
creator=get_dj_conn,
poolclass=NonClosingPool,
pool_reset_on_return=False)
sa_connection = sqlalchemy_engine.connect()
sa_connection.execution_options(autocommit=False)
return sa_connection
class BadNarrowOperator(JsonableError):
def __init__(self, desc, status_code=400):
self.desc = desc
self.status_code = status_code
def to_json_error_msg(self):
return _('Invalid narrow operator: {}').format(self.desc)
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
if operand == 'private':
query = query.select_from(join(query.froms[0], "zerver_recipient",
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[1] = '\\000'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.domain == "mit.edu":
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
if m:
base_stream_name = m.group(1)
else:
base_stream_name = stream.name
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
if self.user_profile.realm.domain == "mit.edu":
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
if m:
base_topic = m.group(1)
else:
base_topic = operand
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
regex = r'^(|personal|\(instance ""\))(\.d)*$'
else:
regex = r'^%s(\.d)*$' % (self._pg_re_escape(base_topic),)
cond = column("subject").op("~*")(regex)
return query.where(maybe_negate(cond))
cond = func.upper(column("subject")) == func.upper(literal(operand))
return query.where(maybe_negate(cond))
def by_sender(self, query, operand, maybe_negate):
try:
sender = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query, operand, maybe_negate):
return query
def by_id(self, query, operand, maybe_negate):
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query, operand, maybe_negate):
if ',' in operand:
# Huddle
try:
emails = [e.strip() for e in operand.split(',')]
recipient = recipient_for_emails(emails, False,
self.user_profile, self.user_profile)
except ValidationError:
raise BadNarrowOperator('unknown recipient ' + operand)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
else:
# Personal message
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
if operand == self.user_profile.email:
# Personals with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == self_recipient.id)
return query.where(maybe_negate(cond))
# Personals with other user; include both directions.
try:
narrow_profile = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
cond = or_(and_(column("sender_id") == narrow_profile.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == narrow_recipient.id))
return query.where(maybe_negate(cond))
def by_search(self, query, operand, maybe_negate):
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(column("subject")),
tsquery).label("subject_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall('"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
column("subject").ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
def highlight_string(string, locs):
if isinstance(string, six.text_type):
string = string.encode('utf-8')
highlight_start = '<span class="highlight">'
highlight_stop = '</span>'
pos = 0
result = ''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return result.decode('utf-8')
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
return dict(match_content=highlight_string(rendered_content, content_matches),
match_subject=highlight_string(escape_html(subject), subject_matches))
def narrow_parameter(json):
# FIXME: A hack to support old mobile clients
if json == '{}':
return None
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
def convert_term(elem):
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2
or any(not isinstance(x, str) and not isinstance(x, six.text_type)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
validator = check_dict([
('operator', check_string),
('operand', check_string),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def is_public_stream(stream, realm):
if not valid_stream_name(stream):
raise JsonableError(_("Invalid stream name"))
stream = get_stream(stream, realm)
if stream is None:
return False
return stream.is_public()
def ok_to_include_history(narrow, realm):
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if is_public_stream(term['operand'], realm):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow):
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile, narrow):
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
if stream_name is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=False,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = [row['recipient_id'] for row in rows]
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
muted_topics = ujson.loads(user_profile.muted_topics)
if muted_topics:
if stream_name is not None:
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
if not muted_topics:
return conditions
muted_streams = bulk_get_streams(user_profile.realm,
[muted[0] for muted in muted_topics])
muted_recipients = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in six.itervalues(muted_streams)])
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
for s in six.itervalues(muted_streams))
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
if muted_topics:
def mute_cond(muted):
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
return and_(stream_cond, topic_cond)
condition = not_(or_(*list(map(mute_cond, muted_topics))))
return conditions + [condition]
return conditions
@has_request_variables
def get_old_messages_backend(request, user_profile,
anchor = REQ(converter=int),
num_before = REQ(converter=to_non_negative_int),
num_after = REQ(converter=to_non_negative_int),
narrow = REQ('narrow', converter=narrow_parameter, default=None),
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
apply_markdown=REQ(default=True,
converter=ujson.loads)):
include_history = ok_to_include_history(narrow, user_profile.realm)
if include_history and not use_first_unread_anchor:
query = select([column("id").label("message_id")], None, "zerver_message")
inner_msg_id_col = literal_column("zerver_message.id")
elif narrow is None:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
"zerver_usermessage")
inner_msg_id_col = column("message_id")
else:
# TODO: Don't do this join if we're not doing a search
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
num_extra_messages = 1
is_search = False
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
# Build the query for the narrow
num_extra_messages = 0
builder = NarrowBuilder(user_profile, inner_msg_id_col)
for term in narrow:
if term['operator'] == 'search' and not is_search:
query = query.column("subject").column("rendered_content")
is_search = True
query = builder.add_term(query, term)
# We add 1 to the number of messages requested if no narrow was
# specified to ensure that the resulting list always contains the
# anchor message. If a narrow was specified, the anchor message
# might not match the narrow anyway.
if num_after != 0:
num_after += num_extra_messages
else:
num_before += num_extra_messages
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = 10000000000000000
before_query = None
after_query = None
if num_before != 0:
before_anchor = anchor
if num_after != 0:
# Don't include the anchor in both the before query and the after query
before_anchor = anchor - 1
before_query = query.where(inner_msg_id_col <= before_anchor) \
.order_by(inner_msg_id_col.desc()).limit(num_before)
if num_after != 0:
after_query = query.where(inner_msg_id_col >= anchor) \
.order_by(inner_msg_id_col.asc()).limit(num_after)
if num_before == 0 and num_after == 0:
# This can happen when a narrow is specified.
after_query = query.where(inner_msg_id_col == anchor)
if before_query is not None:
if after_query is not None:
query = union_all(before_query.self_group(), after_query.self_group())
else:
query = before_query
else:
query = after_query
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_old_messages */")
query_result = list(sa_conn.execute(query).fetchall())
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
search_fields = dict() # type: Dict[int, Dict[str, str]]
message_ids = [] # type: List[int]
user_message_flags = {} # type: Dict[int, List[str]]
if include_history:
message_ids = [row[0] for row in query_result]
# TODO: This could be done with an outer join instead of two queries
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids))
for row in query_result:
message_id = row[0]
if user_message_flags.get(message_id) is None:
user_message_flags[message_id] = ["read", "historical"]
if is_search:
(_, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
else:
for row in query_result:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = parse_usermessage_flags(flags)
message_ids.append(message_id)
if is_search:
(_, _, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
cache_transformer = lambda row: Message.build_dict_from_raw_db_row(row, apply_markdown)
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ('messages', validator=check_list(check_int)),
operation=REQ('op'), flag=REQ('flag'),
all=REQ('all', validator=check_bool, default=False),
stream_name=REQ('stream_name', default=None),
topic_name=REQ('topic_name', default=None)):
request._log_data["extra"] = "[%s %s]" % (operation, flag)
stream = None
if stream_name is not None:
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError(_('No such stream \'%s\'') % (stream_name,))
if topic_name:
topic_exists = UserMessage.objects.filter(user_profile=user_profile,
message__recipient__type_id=stream.id,
message__recipient__type=Recipient.STREAM,
message__subject__iexact=topic_name).exists()
if not topic_exists:
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
do_update_message_flags(user_profile, operation, flag, messages, all, stream, topic_name)
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_profile_by_email(sender_email)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# Are the sender and recipient both @mit.edu addresses?
# We have to handle this specially, inferring the domain from the
# e-mail address, because the recipient may not existing in Zulip
# and we may need to make a stub MIT user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == "mit.edu" and domain == "mit.edu"
def same_realm_irc_user(user_profile, email):
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == domain.replace("irc.", "")
def same_realm_jabber_user(user_profile, email):
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
# The ist.mit.edu realm uses mit.edu email addresses so that their accounts
# can receive mail.
if user_profile.realm.domain == 'ist.mit.edu' and domain == 'mit.edu':
return True
return user_profile.realm.domain == domain
@authenticated_api_view
def api_send_message(request, user_profile):
return send_message_backend(request, user_profile)
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
domain = REQ('domain', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
client = request.client
is_super_user = request.user.is_api_super_user
if forged and not is_super_user:
return json_error(_("User not authorized for this query"))
realm = None
if domain and domain != user_profile.realm.domain:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error(_("User not authorized for this query"))
realm = get_realm(domain)
if not realm:
return json_error(_("Unknown domain %s") % (domain,))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error(_("Missing sender"))
if message_type_name != "private" and not is_super_user:
return json_error(_("User not authorized for this query"))
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and user_profile.realm.domain != "mit.edu":
return json_error(_("Invalid mirrored realm"))
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error(_("IRC stream names must start with #"))
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_success({"id": ret})
@authenticated_json_post_view
def json_update_message(request, user_profile):
return update_message_backend(request, user_profile)
@has_request_variables
def update_message_backend(request, user_profile,
message_id=REQ(converter=to_non_negative_int),
subject=REQ(default=None),
propagate_mode=REQ(default="change_one"),
content=REQ(default=None)):
if subject is None and content is None:
return json_error(_("Nothing to change"))
do_update_message(user_profile, message_id, subject, propagate_mode, content)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_fetch_raw_message(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
try:
message = Message.objects.get(id=message_id)
except Message.DoesNotExist:
return json_error(_("No such message"))
if message.sender != user_profile:
return json_error(_("Message was not sent by you"))
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request, user_profile, content=REQ()):
rendered_content = bugdown.convert(content, user_profile.realm.domain)
return json_success({"rendered": rendered_content})
@authenticated_json_post_view
def json_messages_in_narrow(request, user_profile):
return messages_in_narrow_backend(request, user_profile)
@has_request_variables
def messages_in_narrow_backend(request, user_profile,
msg_ids = REQ(validator=check_list(check_int)),
narrow = REQ(converter=narrow_parameter)):
# Note that this function will only work on messages the user
# actually received
# TODO: We assume that the narrow is a search. For now this works because
# the browser only ever calls this function for searches, since it can't
# apply that narrow operator itself.
query = select([column("message_id"), column("subject"), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
(message_id, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
return json_success({"messages": search_fields})
| {
"content_hash": "90d6fac57a5afab1c059b462b6f8e814",
"timestamp": "",
"source": "github",
"line_count": 852,
"max_line_length": 115,
"avg_line_length": 43.99295774647887,
"alnum_prop": 0.5944186542873913,
"repo_name": "peiwei/zulip",
"id": "0f1301ffb12f60a89ecb69275f1e8158166f2cde",
"size": "37482",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/views/messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "183830"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "397966"
},
{
"name": "JavaScript",
"bytes": "1588795"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "96085"
},
{
"name": "Python",
"bytes": "2010761"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "33341"
}
],
"symlink_target": ""
} |
__author__ = 'john'
import csv
from operator import itemgetter
import uuid
from pattern.metrics import similarity, levenshtein, LEVENSHTEIN, DICE
from fuzzywuzzy import fuzz
import nltk
import string
london = '<insert TARGET file>'
entities = '<insert PRIME file>'
def getPublicCompanies():
pc = []
with open(london, 'rb') as fin:
fR = csv.reader(fin)
fR.next()
for row in fR:
row = [x.strip() for x in row]
pc.append(row[1])
#print row[1]
return pc
def getEntityCompanies():
ec = []
with open(entities, 'rb') as fin:
fR = csv.reader(fin)
fR.next()
for row in fR:
row = [x.strip() for x in row]
#print row[34]
if row[34] not in ec:
ec.append(row[34])
return ec
class Matching(object):
"""
matching - should be able to take a word for word or row for row
Test if word is same and/or how similar
for word in [words,]:
a = Matching(word, prime)
score = a.score
"""
def __init__(self, prime, target):
self.prime = prime
self.target = target
self.score = self.score()
def score(self):
return float(self.match2words()) + float(self.fullMatch()) + float(self.firstPart())
def match2words(self):
lev1 = similarity(self.prime, self.target, metric=LEVENSHTEIN)
#lev2 = levenshtein(self.prime, self.target)
#lev3 = nltk.edit_distance(self.prime, self.target)
di = similarity(self.prime, self.target, metric=DICE)
score2 = fuzz.ratio(self.prime, self.target)
score3 = fuzz.partial_ratio(self.prime, self.target)
score1 = lev1 + di + score2 + score3
score4 = fuzz.ratio(self.prime[:5], self.target[:5])
return score1 + score4
def firstPart(self):
lenPrime = len(self.prime)
lenTarget = len(self.target)
if self.prime[:int(lenPrime*.5)] == self.target[:int(lenTarget*.5)]:
return 100
elif self.prime[:5] == self.target[:5]:
return 50
else:
return 0
def fullMatch(self):
if self.prime == self.target:
return 100
else:
return 0
class MatchingList(object):
"""
takes two lists of single entities such as company lists, etc
ultimately returns lists --> {primeWord1: {id: idNum, matches: [(id, targetWord1, score), (id, targetWord2, score)]},
primeWord2: {id: idNum, matches: [(id, tW1, score), (id, tW2, score)]}}
companyMatches = MatchingList(cxCompanies, londonCompanies)
matches = companyMatches.match()
b = {'Google':
{'matches': [(UUID('2ea9e172-ec32-4442-ad5c-51a826745b77'), 'Google, Inc.', 176.31428571428572)],
'id': UUID('8e913361-bf05-4f64-be69-21514bc64305')},
'Microsoft, Inc.':
{'matches':
[(UUID('1dd02f45-9e48-47c7-a05a-f09c52ca8e38'), 'Microsoft', 183.4923076923077)],
'id': UUID('22db86d8-d376-4173-8a23-b07e2fbcc0ab')}}
"""
def __init__(self, primeList, targetList):
self.primeList = primeList
self.targetList = targetList
self.primeListIndex = ''
self.targetListIndex = ''
self.pLIDIndex = 0
self.tLIDIndex = 0
self.scoreThreshold = 150.00
self.resultList = {}
def idGenerator(self, row=None, SecOrPri='Prime'): #element=None,
if row:
if SecOrPri == 'Prime':
return row[self.pLIDIndex]
else:
return row[self.tLIDIndex]
else:
return uuid.uuid4()
def matchProcessing(self):
# sorts the result lists and returns top 5 results
pass
def normalize(self, word):
table = string.maketrans("","")
word = word.translate(table, string.punctuation)
return word.lower().strip()
def match(self):
"""
:return:
returns the list
"""
for primeElement in self.primeList:
self.resultList[primeElement] = {'id': self.idGenerator()}
print 'Finding a match for ', primeElement
for targetElement in self.targetList:
#print 'comparing', self.normalize(primeElement), self.normalize(targetElement)
m = Matching(self.normalize(primeElement), self.normalize(targetElement))
score = m.score
row = [self.idGenerator(), targetElement, score]
#if score > self.scoreThreshold:
#print '\t', primeElement, '---> ', targetElement
try:
self.resultList[primeElement]['matches'].append(row)
except KeyError, e:
self.resultList[primeElement]['matches'] = [row, ]
newSortedList = sorted(self.resultList[primeElement]['matches'], key=itemgetter(2), reverse=True)
self.resultList[primeElement]['matches'] = newSortedList
for x in self.resultList[primeElement]['matches'][:5]:
if x[-1] > 200.00:
print '\t', primeElement, '---> ', x[1:]
print'___' * 100
def getMatch(self):
self.match()
rL = self.resultList
closest = {}
for word, matchesID in rL.iteritems():
matches = matchesID['matches']
wordID = matchesID['id']
#print word, wordID, matches
for match in matches:
if match[2] > self.scoreThreshold:
row = word, '---> ', match[1:]
print row
closest[word] = {'id': wordID}
if 'matches' in closest.keys():
closest[word]['matches'].append(match)
else:
closest[word]['matches'] = [match, ]
return closest
class MatchProfile(object):
"""
matches 2 rows and returns score for similiarity
Need to figure out how to get element matching as well Name:Name, Company:Company
"""
def __init__(self, row1, row2):
self.rowPrime = row1
self.rowTarget = row2
self.elementPrime = ''
self.elementTarget = ''
publicCompanies = getPublicCompanies()
cxEntities = getEntityCompanies()
m = MatchingList(cxEntities, publicCompanies)
m.scoreThreshold = 200.00
#m = MatchingList(['Google', 'Microsoft, Inc.'], ['Apple', 'Microsoft', 'what', 'Google, Inc.'])
a = m.getMatch()
print a
"""
Build a recommendation engine for proper matches
Have the program learn from what is a match and what isn't
""" | {
"content_hash": "32d4051e928bf298774c3556b0163ce3",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 121,
"avg_line_length": 31.037037037037038,
"alnum_prop": 0.5657816229116945,
"repo_name": "johnconnelly75/matchr",
"id": "9238c01af6ea3a5e121e13f928a7c3a2bfdc4714",
"size": "6704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matchr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6724"
}
],
"symlink_target": ""
} |
import itertools
from sahara.common.policies import base
from sahara.common.policies import cluster
from sahara.common.policies import cluster_template
from sahara.common.policies import cluster_templates
from sahara.common.policies import clusters
from sahara.common.policies import data_source
from sahara.common.policies import data_sources
from sahara.common.policies import image
from sahara.common.policies import images
from sahara.common.policies import job
from sahara.common.policies import job_binaries
from sahara.common.policies import job_binary
from sahara.common.policies import job_binary_internals
from sahara.common.policies import job_executions
from sahara.common.policies import job_template
from sahara.common.policies import job_type
from sahara.common.policies import job_types
from sahara.common.policies import jobs
from sahara.common.policies import node_group_template
from sahara.common.policies import node_group_templates
from sahara.common.policies import plugin
from sahara.common.policies import plugins
def list_rules():
return itertools.chain(
base.list_rules(),
clusters.list_rules(),
cluster_templates.list_rules(),
data_sources.list_rules(),
images.list_rules(),
job_binaries.list_rules(),
job_binary_internals.list_rules(),
job_executions.list_rules(),
job_types.list_rules(),
jobs.list_rules(),
node_group_templates.list_rules(),
plugins.list_rules(),
cluster.list_rules(),
cluster_template.list_rules(),
data_source.list_rules(),
image.list_rules(),
job_binary.list_rules(),
job_type.list_rules(),
job.list_rules(),
node_group_template.list_rules(),
plugin.list_rules(),
job_template.list_rules()
)
| {
"content_hash": "1afe0d27afe4eb54b6678f9ea2e09b5f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 55,
"avg_line_length": 35.86274509803921,
"alnum_prop": 0.7288135593220338,
"repo_name": "openstack/sahara",
"id": "ad56e03b68be1acc85b2b1b8afa12bb50ae95fb4",
"size": "2375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/common/policies/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "2197746"
},
{
"name": "Shell",
"bytes": "37893"
}
],
"symlink_target": ""
} |
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
from skdaccess.framework.param_class import *
from skdaccess.utilities.mahali_util import convert_date
from pkg_resources import resource_filename
# Standard library imports
from glob import glob
import shutil
import os
import json
from collections import OrderedDict
# 3rd part imports
from six.moves.urllib.request import urlopen
from tqdm import tqdm
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStream):
'''
Data Fetcher for Mahali temperature data
'''
def __init__(self, ap_paramList=[], start_date=None, end_date=None):
'''
Initialize Mahali temperature data fetcher
@param ap_paramList[stations]: Autolist of stations (Defaults to all stations)
@param start_date: Starting date for seelcting data (Defaults to beginning of available data)
@param end_date: Ending date for selecting data (Defaults to end of available data)
'''
if start_date == None:
self.start_date = pd.to_datetime('2015271', format='%Y%j')
else:
self.start_date = convert_date(start_date)
if end_date == None:
self.end_date = pd.to_datetime('2015315', format='%Y%j')
else:
self.end_date = convert_date(end_date)
if len(ap_paramList) == 0:
station_list = [
'mh02',
'mh03',
'mh04',
'mh05',
'mh06',
'mh07',
'mh08',
'mh09',
'mh13',
]
ap_paramList = [ AutoList(station_list) ]
super(DataFetcher, self).__init__(ap_paramList)
def retrieveOnlineData(self, data_specification):
'''
Load data in from a remote source
@param data_specification: Pandas dataframe containing the columns 'station', 'date', and 'filename'
@return Ordered dictionary for each station (key) which cointains a pandas data frame of the temperature
'''
# Location of data depot
url = 'http://apollo.haystack.mit.edu/mahali-data/'
locations = ( url
+ 'metadata/'
+ data_specification['station']
+ '/logs/sensor/'
+ data_specification['date'].apply(lambda x: x.strftime('%Y%j'))
+ '/'
+ data_specification['filename'] ).tolist()
# Data will go into this dictionary as {station: [(time, measurement), (time2, measurement2), ...]}
all_temperature_data = OrderedDict()
# Parse jsonl files
for station, location in zip(data_specification['station'], locations):
with urlopen(location) as this_json_file:
# Encased in a try/except because of lines full of junk
# (e.g. the last line of metadata/mh02/logs/sensor/2015277/sensor@2015-10-04T225240Z_1443999160.jsonl)
try:
for line in this_json_file:
line_data = json.loads(line)
this_time = pd.to_datetime(line_data['time'])
this_temp = float(line_data["event_data"]["data"])
# If data for that station already exists
try:
all_temperature_data[station].append([this_time, this_temp])
# If there's no existing entry for that station
except KeyError:
all_temperature_data[station] = [ [this_time, this_temp] ]
except ValueError:
pass
for station in all_temperature_data.keys():
all_temperature_data[station] = pd.DataFrame(all_temperature_data[station], columns=['Time','Temperature']).set_index('Time')
return all_temperature_data
def output(self):
'''
Generate data wrapper for Mahali temperatures
@return Mahali temperature data wrapper
'''
# Function to extract date from filename (only month/day/year, no hours/minutes/seconds)
def toDateTime(in_filename):
return pd.to_datetime(pd.to_datetime(in_filename[7:25]).strftime('%Y-%m-%d'))
# Read in file list:
mahali_temperature_info = resource_filename('skdaccess', os.path.join('support','mahali_temperature_info.txt'))
filenames = pd.read_csv(mahali_temperature_info,header=None,
names=('station','filename'),
skipinitialspace=True)
# Create a columns of dates
filenames['date'] = filenames['filename'].apply(toDateTime)
# Need to grab day before as data can spill over
adjusted_start_date = self.start_date - pd.to_timedelta('1d')
adjusted_end_date = self.end_date + pd.to_timedelta('1d')
station_list = self.ap_paramList[0]()
# Get data for each selected station one day before until one day afte requested date
index_to_retrieve = np.logical_and.reduce([filenames.loc[:, 'station'].apply(lambda x: x in station_list),
filenames.loc[:, 'date'] >= adjusted_start_date,
filenames.loc[:, 'date'] <= self.end_date])
all_temperature_data = self.retrieveOnlineData(filenames[index_to_retrieve])
# Due to data spillover, cut each data frame in dictionary
for station in all_temperature_data.keys():
all_temperature_data[station] = all_temperature_data[station].loc[adjusted_start_date:adjusted_end_date]
# Return table wrapper of data
return TableWrapper(all_temperature_data, default_columns = ['Temperature'])
| {
"content_hash": "b5660a6275749735faea094612afbd22",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 137,
"avg_line_length": 38.99354838709677,
"alnum_prop": 0.5660158835208471,
"repo_name": "skdaccess/skdaccess",
"id": "c8642271b120588fd260dc3e323524892deef2fd",
"size": "7361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skdaccess/geo/mahali/temperature/data_fetcher.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "335879"
},
{
"name": "Python",
"bytes": "119834"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from constants.payments import PROVIDER_LOOKUP
from market.models import Price, price_locale
from mkt.webpay.models import ProductIcon
class PriceSerializer(serializers.ModelSerializer):
prices = serializers.SerializerMethodField('get_prices')
localized = serializers.SerializerMethodField('get_localized_prices')
pricePoint = serializers.CharField(source='name')
name = serializers.CharField(source='tier_name')
class Meta:
model = Price
def get_prices(self, obj):
provider = self.context['request'].GET.get('provider', None)
if provider:
provider = PROVIDER_LOOKUP[provider]
return obj.prices(provider=provider)
def get_localized_prices(self, obj):
region = self.context['request'].REGION
for price in self.get_prices(obj):
if price['region'] == region.id:
result = price.copy()
result.update({
'locale': price_locale(price['price'], price['currency']),
'region': region.name,
})
return result
return {}
class ProductIconSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField('get_url')
def get_url(self, obj):
if not obj.pk:
return ''
return obj.url()
class Meta:
model = ProductIcon
exclude = ('format',)
| {
"content_hash": "f13ad854fcb0b58076177e6f5f8457f5",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 30.291666666666668,
"alnum_prop": 0.6279229711141678,
"repo_name": "jinankjain/zamboni",
"id": "a7c0b255d678d3af2d1749216528dcdc15aa692c",
"size": "1454",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mkt/webpay/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
ComplaintsApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ComplaintsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def complaints_complaints(self, phone_number, **kwargs):
"""
Complaints: Free service (with registration), providing community and government complaint lookup by phone number for up to 2,000 queries per month. Details include number complaint rates from (FTC, FCC, IRS, Indiana Attorney General) and key entity tag extractions from complaints.
This is the main funciton to get data out of the call control reporting system<br />\r\n Try with api_key 'demo' and phone numbers 18008472911, 13157244022, 17275567300, 18008276655, and 12061231234 (last one not spam)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.complaints_complaints(phone_number, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str phone_number: phone number to search (required)
:return: Complaints
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['phone_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method complaints_complaints" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'phone_number' is set
if ('phone_number' not in params) or (params['phone_number'] is None):
raise ValueError("Missing the required parameter `phone_number` when calling `complaints_complaints`")
resource_path = '/api/2015-11-01/Complaints/{phoneNumber}'.replace('{format}', 'json')
path_params = {}
if 'phone_number' in params:
path_params['phoneNumber'] = params['phone_number']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/json', 'application/xml', 'text/xml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Complaints',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| {
"content_hash": "81c63c6ff89ecd3c917e8caac9a381bc",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 292,
"avg_line_length": 39.82644628099174,
"alnum_prop": 0.5951442207926956,
"repo_name": "CallControl/CallControlClient",
"id": "e388b5b8b96af502786922baa4a507712893a580",
"size": "4836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-client/swagger_client/apis/complaints_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "611"
},
{
"name": "C#",
"bytes": "183578"
},
{
"name": "Clojure",
"bytes": "18310"
},
{
"name": "Java",
"bytes": "137210"
},
{
"name": "JavaScript",
"bytes": "68944"
},
{
"name": "Objective-C",
"bytes": "97609"
},
{
"name": "PHP",
"bytes": "147510"
},
{
"name": "Perl",
"bytes": "74529"
},
{
"name": "Perl 6",
"bytes": "20373"
},
{
"name": "Python",
"bytes": "111225"
},
{
"name": "Ruby",
"bytes": "104889"
},
{
"name": "Scala",
"bytes": "30489"
},
{
"name": "Shell",
"bytes": "20139"
},
{
"name": "TypeScript",
"bytes": "22764"
}
],
"symlink_target": ""
} |
import requests
from django.utils.translation import gettext_lazy as _
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from ..base import ProviderException
from .provider import DoubanProvider
class DoubanOAuth2Adapter(OAuth2Adapter):
provider_id = DoubanProvider.id
access_token_url = "https://www.douban.com/service/auth2/token"
authorize_url = "https://www.douban.com/service/auth2/auth"
profile_url = "https://api.douban.com/v2/user/~me"
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": "Bearer %s" % token.token}
resp = requests.get(self.profile_url, headers=headers)
extra_data = resp.json()
"""
Douban may return data like this:
{
'code': 128,
'request': 'GET /v2/user/~me',
'msg': 'user_is_locked:53358092'
}
"""
if "id" not in extra_data:
msg = extra_data.get("msg", _("Invalid profile data"))
raise ProviderException(msg)
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(DoubanOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(DoubanOAuth2Adapter)
| {
"content_hash": "4efc0adeabfb98146b4729333bdfaf79",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 32.023809523809526,
"alnum_prop": 0.6535315985130111,
"repo_name": "rsalmaso/django-allauth",
"id": "d96dda7cec8a5d37c3a62559cb20ebfa996b080c",
"size": "1345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/douban/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "20404"
},
{
"name": "JavaScript",
"bytes": "3360"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "923713"
}
],
"symlink_target": ""
} |
import unittest
from cupy import testing
@testing.gpu
class TestCount(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(type_check=False)
def test_count_nonzero(self, xp, dtype):
m = testing.shaped_random((2, 3), xp, xp.bool_)
a = testing.shaped_random((2, 3), xp, dtype) * m
c = xp.count_nonzero(a)
self.assertIsInstance(c, int)
return c
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(type_check=False)
def test_count_nonzero_zero_dim(self, xp, dtype):
a = xp.array(1.0, dtype=dtype)
c = xp.count_nonzero(a)
self.assertIsInstance(c, int)
return c
| {
"content_hash": "11ecac6a065237aa406e743b401027af",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 56,
"avg_line_length": 27.653846153846153,
"alnum_prop": 0.631432545201669,
"repo_name": "sinhrks/chainer",
"id": "74574a57f5646a39fc244bb600c77eeb02ebbd25",
"size": "719",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/cupy_tests/sorting_tests/test_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "19540"
},
{
"name": "Cuda",
"bytes": "6118"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "1351933"
}
],
"symlink_target": ""
} |
from django.conf import settings
def braintree_client_side_encryption_key(request):
return {
'BRAINTREE_CLIENT_SIDE_ENCRYPTION_KEY':
getattr(settings, 'BRAINTREE_CLIENT_SIDE_ENCRYPTION_KEY', None),
}
| {
"content_hash": "2bf0101e92a517412932bdf08061b078",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 72,
"avg_line_length": 28.25,
"alnum_prop": 0.7035398230088495,
"repo_name": "iconfinder/djbraintree",
"id": "97f387f24d08cd7665041c2be8975ea79740ea9c",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djbraintree/context_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3097"
}
],
"symlink_target": ""
} |
from setuptools import setup
# ensure dependencies are installed
import simple
import simplewheel
assert simplewheel.__version__ == "2.0"
setup(
name="pep518_with_extra_and_markers",
version="1.0",
py_modules=["pep518_with_extra_and_markers"],
)
| {
"content_hash": "8cb9ab3a16decc223aeef231670ec52b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 49,
"avg_line_length": 20.076923076923077,
"alnum_prop": 0.7164750957854407,
"repo_name": "pradyunsg/pip",
"id": "bfac5b46783d51451a3861b726fd93b4a77f4702",
"size": "283",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/data/src/pep518_with_extra_and_markers-1.0/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7137519"
}
],
"symlink_target": ""
} |
from landlab.field.scalar_data_fields import ScalarDataFields, FieldError
from landlab.field.grouped import ModelDataFields, GroupError, GroupSizeError
from landlab.field.field_mixin import ModelDataFieldsMixIn
from .graph_field import GraphFields
__all__ = ['ScalarDataFields', 'ModelDataFields', 'ModelDataFieldsMixIn',
'FieldError', 'GroupError', 'GroupSizeError', 'GraphFields', ]
| {
"content_hash": "075e5a238e782a3bb1e7bef3179644f9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 77,
"avg_line_length": 56.714285714285715,
"alnum_prop": 0.7909319899244333,
"repo_name": "csherwood-usgs/landlab",
"id": "37479db8188eb85f424298710ec0b2a3e7c70bee",
"size": "397",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "landlab/field/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1359"
},
{
"name": "PowerShell",
"bytes": "6112"
},
{
"name": "Python",
"bytes": "2844194"
},
{
"name": "Shell",
"bytes": "2773"
}
],
"symlink_target": ""
} |
from .Metric import *
from .Linalg import *
from .Scaler import *
from .Trainer import *
from .Visualizer import *
| {
"content_hash": "ebe1b094f26e6dc28a38b580a6b8bb91",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 25,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "MaxInGaussian/GomPlex",
"id": "380f86f5072d894a0ac84e78173b3a5a24182259",
"size": "381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/GomPlex/util/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "134746"
}
],
"symlink_target": ""
} |
"""Fichier contenant l'action ecrire."""
from corps.fonctions import valider_cle
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""écrit une information dans une structure."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.ecrire, "Structure", "str", "object")
@staticmethod
def ecrire(structure, cle, valeur):
"""Écrit l'information dans la structure indiquée.
Paramètres à préciser :
* structure : la structure à modifier
* cle : la clé de la case à modifier
* valeur : la valeur de la case à écrire (tous types acceptés)
Exemple d'utilisation :
ecrire structure "nom" "Quelque chose"
ecrire structure "numero" 5
ecrire structure "elements" liste(1, 2, 3, 8)
ecrire structure "coupable" joueur("Kredh")
**ATTENTION** : la clé de case doit être une clé (sans
majuscules ni accents, ne comprenant que des lettres et
des chiffres, ainsi que le signe souligné _, si il n'est
pas en début de mot). Les noms suivants sont par ailleurs interdits :
"e_existe", "get_nom_pour", "id", "structure"
"""
valider_cle(cle)
if cle.startswith("_"):
raise ErreurExecution("la clé précisée {} commence par " \
"un signe souligné".format(repr(cle)))
interdits = ("e_existe", "get_nom_pour", "id", "structure")
if cle in interdits:
raise ErreurExecution("Ce nom de clé est interdit. Clés " \
"interdites : {}.".format(repr(interdits)))
setattr(structure, cle, valeur)
| {
"content_hash": "534dc879d8c7a39e183e5e5419d5e9f3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 34.03921568627451,
"alnum_prop": 0.6209677419354839,
"repo_name": "vlegoff/tsunami",
"id": "636decb1437709765496666bf5a14cbcdfb90728",
"size": "3326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/scripting/actions/ecrire.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals, print_function
from django.db.models.signals import m2m_changed
def commodity_inventory_changed(sender, instance, *args, **kwargs):
from libs.datetimes import dates_during
from hawaii.apps.commodity.models import CommodityProduct, CommodityInventory
inventory = CommodityInventory.objects.select_related().get(pk=instance.pk)
weekdays = inventory.days.values_list("number", flat=True)
dates = dates_during(from_date=inventory.begin, to_date=inventory.end, weekdays=weekdays)
copy_dates = dates[:]
products = list(inventory.products.all())
products_will_delete = []
for product in products:
if not product.datetime.date in copy_dates:
products_will_delete.append(product.id)
else:
dates.remove(product.date)
# delete products
CommodityProduct.objects.filter(id__in=products_will_delete).delete()
# create products
CommodityProduct.bulk_create_products(inventory, dates)
def register_commodity_inventory_changed():
from hawaii.apps.commodity.models import CommodityInventory
m2m_changed.connect(commodity_inventory_changed, sender=CommodityInventory.days.through, dispatch_uid='commodity_inventory_changed')
def register_commodity_signals():
register_commodity_inventory_changed()
print("commodity signal register") | {
"content_hash": "3dc58eeb2fb7d7646e901839d391eda9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 136,
"avg_line_length": 38.388888888888886,
"alnum_prop": 0.743849493487699,
"repo_name": "chenchiyuan/hawaii",
"id": "24922da768b8f40ab36d40bb91f7cbb6210eedf4",
"size": "1434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hawaii/apps/commodity/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1556"
},
{
"name": "CSS",
"bytes": "100775"
},
{
"name": "HTML",
"bytes": "3479506"
},
{
"name": "JavaScript",
"bytes": "2146262"
},
{
"name": "Python",
"bytes": "106187"
},
{
"name": "Shell",
"bytes": "534"
}
],
"symlink_target": ""
} |
import json
import urlparse
import requests
import app
def get_video_api_url(video_id):
return 'https://www.googleapis.com/youtube/v3/videos?part=snippet&id=%s&key=%s' % (video_id, app.config.GOOGLE_SERVER_API_KEY)
def get_channel_api_url(channel_id):
return 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&id=%s&key=%s' % (channel_id, app.config.GOOGLE_SERVER_API_KEY)
def get_playlist_api_url(playlist_id, next_page_token):
if next_page_token:
return 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&pageToken=%s&maxResults=50&playlistId=%s&key=%s' % (next_page_token, playlist_id, app.config.GOOGLE_SERVER_API_KEY)
return 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=%s&key=%s' % (playlist_id, app.config.GOOGLE_SERVER_API_KEY)
def get_video_url(video_id):
return 'https://www.youtube.com/watch?v=%s' % (video_id)
def get_channel_id(url):
if url.find('/channel/') == -1:
return None
return url.split('/channel/')[1].split('/')[0]
def crawl_playlist(playlist_id):
print 'Crawling playlist:', playlist_id
videos = []
next_page_token = None
while True:
u = get_playlist_api_url(playlist_id, next_page_token)
r = requests.get(u)
j = json.loads(r.text)
for item in j['items']:
video_id = item['snippet']['resourceId']['videoId']
url = get_video_url(video_id)
title = item['snippet']['title']
thumbnail = item['snippet']['thumbnails']['default']['url']
print 'Adding video : ', url
videos.append({
'url': url,
'title': title,
'thumbnail': thumbnail,
'video_id': video_id
})
next_page_token = j.get('nextPageToken')
if not next_page_token:
break
return videos
def get_videos(url):
"""
Returns the list of videos that are present in `url`
videos has object video of skeleton
video = {
'url': VIDEO_URL,
'title': TITLE,
'thumbnail': THUMBNAIL URL,
}
"""
videos = []
query_part = urlparse.urlsplit(url).query
query_params = dict(urlparse.parse_qsl(query_part))
channel_id = get_channel_id(url)
video_id = query_params.get('v')
playlist_id = query_params.get('list')
if channel_id:
u = get_channel_api_url(channel_id)
r = requests.get(u)
j = json.loads(r.text)
playlist_id = j['items'][0]['contentDetails']['relatedPlaylists']['uploads']
video_list = crawl_playlist(playlist_id)
videos.extend(video_list)
elif playlist_id:
video_list = crawl_playlist(playlist_id)
videos.extend(video_list)
elif video_id:
u = get_video_api_url(video_id)
r = requests.get(u)
j = json.loads(r.text)
url = url
title = j['items'][0]['snippet']['title']
thumbnail = j['items'][0]['snippet']['thumbnails']['default']['url']
videos.append({
'url': url,
'title': title,
'thumbnail': thumbnail,
'video_id': video_id
})
return videos
| {
"content_hash": "8a1631df4d01a82b2107bcaf8c88e436",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 200,
"avg_line_length": 27.42016806722689,
"alnum_prop": 0.5896414342629482,
"repo_name": "arpitbbhayani/penny",
"id": "8b2b721377cbf483a572e25db53c1d9d9c754c3d",
"size": "3263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tpsites/youtube_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23"
},
{
"name": "CSS",
"bytes": "2022061"
},
{
"name": "HTML",
"bytes": "31272"
},
{
"name": "JavaScript",
"bytes": "2265006"
},
{
"name": "Lex",
"bytes": "1094"
},
{
"name": "Makefile",
"bytes": "1071"
},
{
"name": "Python",
"bytes": "56089"
},
{
"name": "Shell",
"bytes": "507"
},
{
"name": "Yacc",
"bytes": "5995"
}
],
"symlink_target": ""
} |
import io
import re
from setuptools import setup
with io.open('./xero/__init__.py', encoding='utf8') as version_file:
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M
)
if version_match:
version = version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
with io.open('README.md', encoding='utf8') as readme:
long_description = readme.read()
setup(
name='pyxero',
version=version,
description='Python API for accessing the REST API of the Xero accounting tool.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://github.com/freakboy3742/pyxero',
packages=['xero', ],
install_requires=[
'six>=1.8.0',
'requests>=1.1.0',
'requests-oauthlib>=0.3.0',
'python-dateutil>=2.1',
'pycrypto>=2.6.1'
],
tests_require=[
'mock',
],
license='New BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Office/Business :: Financial :: Accounting',
],
test_suite="tests",
)
| {
"content_hash": "25707498dd71432974f30fd6fb99d3cb",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 85,
"avg_line_length": 29.98148148148148,
"alnum_prop": 0.5898702903026559,
"repo_name": "thisismyrobot/pyxero",
"id": "d540050d9c440e1676d6e8e917b2d528cae1aaeb",
"size": "1640",
"binary": false,
"copies": "1",
"ref": "refs/heads/standalone",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "80850"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='electrical',
version='0.1',
description='electrical',
url='http://example.com',
author='bhaskaran',
author_email='baskar4n@gmail.com',
license='MIT License',
packages=['electrical'],)
| {
"content_hash": "f9a1ceb6e99f619300d65b00ccae86c1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 40,
"avg_line_length": 27.7,
"alnum_prop": 0.6173285198555957,
"repo_name": "bhaskar4n/Electrical-Library-",
"id": "7321f02adcff6f39669c9ae98a1ab8e9001d2281",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1300"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': 'This username has already been taken.'
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class MyUserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
fieldsets = (('User Profile', {'fields': ('name',)}),) + AuthUserAdmin.fieldsets
list_display = ('username', 'name', 'is_superuser')
search_fields = ['name']
| {
"content_hash": "f4599ac29efcfe39c7fd13465e47e191",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 30.72972972972973,
"alnum_prop": 0.7053649956024626,
"repo_name": "genomics-geek/cookiecutter-django-reactjs",
"id": "a7000f2e0dfc62101c87472ff8186a5d3a72dabc",
"size": "1137",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5146"
},
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "HTML",
"bytes": "25464"
},
{
"name": "JavaScript",
"bytes": "13051"
},
{
"name": "Makefile",
"bytes": "8417"
},
{
"name": "Python",
"bytes": "82888"
},
{
"name": "Shell",
"bytes": "10921"
}
],
"symlink_target": ""
} |
class AxonList:
#Constructor. You can initialize your AxonListe our leave it empty
def __init__(self,listAxon=None):
"""
:param listAxon: axon liste to join. facultative, will be initialized empty if none
"""
if listAxon is None:
self.__listAxon=set()
self.__nbrAxon=0
else:
self.__listAxon=set(listAxon)
self.__nbrAxon=len(listAxon)
#insert an axon and increment nbrAxon
def insert(self,axon):
"""
:param axon: the axon to insert
:return: none
"""
self.__listAxon.add(axon)
self.__nbrAxon+=1
#Overload + for adding two axonlist. duplicate won't be copied.
def __add__(self,other):
"""
:param other: the axonlist to add
:return: a new axon liste, union of self and other
"""
liste=self.__listAxon.union(other.getAxonList())
return AxonList(liste)
#Overload -, copied all element in self but not in other
def __sub__(self, other):
"""
:param other: the axon list to compare
:return: a new axon list, result of the difference beetween self and other
"""
liste=self.__listAxon.difference(other.getAxonList)
return AxonList(liste)
#Overload == allow axonList comparaison
def __eq__(self, other):
"""
:param other: the axonlist to compare self to
:return: boolean, is both list equals.
"""
return self.__listAxon, self.__nbrAxon==other.__listAxon,other.__nbrAxon
#Getter
def getAxonList(self):
"""
:return: AxonList for self
"""
return self.__listAxon
#Getter
def getNbrAxon(self):
"""
:return:nbrAxon for self
"""
return self.__nbrAxon
#getAllAxon under the specified diametre into a new AxonList
def getAxonUnderDiametre(self,diametre):
"""
:param diametre: the diametre maximum
:return: a new axonList with all axon below diametre
"""
liste=AxonList()
for axon in self.__listAxon:
if axon.getDiameter()<diametre:
liste.insert(axon)
return liste
def getAxonHigherThanDiametre(self,diametre):
"""
:param diametre: the diametre minimum
:return: a new axonList with all axon below diametre
"""
liste=AxonList()
for axon in self.__listAxon:
if axon.getDiameter()>diametre:
liste.insert(axon)
return liste
def getDiameterMean(self):
"""
:return: the mean od the diametre of all axon
"""
temp=0
for axon in self.__listAxon:
temp+=axon.getDiameter()
return temp/self.getNbrAxon()
def save(self, output):
"""
:param output: file where to save
"""
import _pickle as cPickle
fileFormat = output.split('.')[-1]
if fileFormat == "csv":
# In CSV, we ignore the tags, for now
# TODO Find an alternative !
f = open(output, 'w')
for axon in self.__listAxon:
current = axon.toArray()
for i in range(len(current)-1):
if "float" in str(type(current[i])):
current[i] = "%.10f" % current[i]
listAxon = ','.join(current[:-1])
f.write(listAxon + "\n")
f.seek(0,0)
f.close()
else:
if output[-4:] != ".bin":
output += ".bin"
cPickle.dump(self.__listAxon, open(output, "wb"))
def load(self, inputFile):
"""
:param output: file where to save
:param format: format to save into
"""
import _pickle as cPickle
if inputFile.split('.')[-1] != "bin":
# TODO Import CSV ?
print("Sorry, can only import binary numpy arrays for now !\n\tSo we can't import CSV for the moment.")
else:
self.__listAxon = cPickle.load(open(inputFile, "rb"))
| {
"content_hash": "57802741e73f02edfb6d2db9fcfba73b",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 115,
"avg_line_length": 33.314516129032256,
"alnum_prop": 0.5400629387557492,
"repo_name": "neuropoly/axonsegpy",
"id": "39c3118dd225abf7c4d89e369bde84bbbd22f407",
"size": "4324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axonsegpy/core/AxonList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46339"
}
],
"symlink_target": ""
} |
from CommonServerPython import DemistoException
from pytest import raises
def test_http_request_json_negative(requests_mock):
from VulnDB import Client, vulndb_get_cve_command
base_path = 'https://vulndb.cyberriskanalytics.com'
requests_mock.post(
f'{base_path}/oauth/token',
json={
'access_token': 'access_token'
})
cve_id = '2014-1234'
requests_mock.get(
f'{base_path}/api/v1/vulnerabilities/{cve_id}/find_by_cve_id',
json={
'details': 'You have exceeded your API usage for the month. Please contact support'
})
client = Client(False, False, f'{base_path}/api/v1', 'client_id', 'client_secret')
with raises(DemistoException, match='You have exceeded your API usage for the month'):
vulndb_get_cve_command({'cve_id': cve_id}, client)
| {
"content_hash": "105e262d58a2136508f2d601a5262b7b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 95,
"avg_line_length": 40.23809523809524,
"alnum_prop": 0.6532544378698225,
"repo_name": "demisto/content",
"id": "474b555f378e0b3854d885600cce64d6610bb687",
"size": "845",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/VulnDB/Integrations/VulnDB/VulnDB_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
"""
These integration tests exist solely to test the interaction between pyggybank and GPG on the CLI.
All attempts should be made to avoid extending these tests in preference for unit tests of the functions
themselves (where necessary, mocking out the GPG interactions).
TODO: It would be great to bring these tests into the pyggybank.test module, and marking them as
full-blown integration tests.
"""
import pexpect
import sys
import os
import shutil
from pathlib import Path
gpg_vn = 2
def test_gpg_new_key_prompt():
global gpg_vn
# Check that pyggybank drops us into the gpg keygen prompt if we don't have any keys
tmp = Path('tmp')
if tmp.exists():
shutil.rmtree(tmp)
tmp.mkdir()
child = pexpect.spawnu('pyggybank wizard --gpg-home={}'.format(tmp))
# child.logfile = os.fdopen(sys.stdout.fileno(), 'w')
# We just want to check that we have initiated the gpg wizard correctly. The details aren't important.
newer_gpg = True
try:
child.expect('Your selection?', timeout=1)
child.sendline('1')
child.expect('What keysize do you want?', timeout=1)
child.sendline('2048')
newer_gpg = False
gpg_vn = 1
child.expect('key expires in n years', timeout=1)
child.sendline('0')
except pexpect.exceptions.TIMEOUT:
pass
if newer_gpg:
child.expect('Real name:')
child.sendline('Testing Real Me')
child.expect('Email address:')
child.sendline('test@example.com')
child.expect('\(O\)kay\/\(Q\)uit\?')
child.close()
# Let's get a newline afterwards.
assert True
print()
def test_gpg_no_agent():
# Check the pyggybank behaviour when the gpg key hasn't been unlocked
# (i.e. the gpg-agent is fresh)
gpghome = Path(__file__).parent/'gpg'
accounts_file = Path('accounts.encrypted.{}.yml'.format(gpg_vn))
if gpg_vn < 2:
raise RuntimeError('Cant yet handle older gpg.')
if accounts_file.exists():
accounts_file.unlink()
child = pexpect.spawnu('pyggybank wizard --gpg-home={} --accounts-file={}'.format(gpghome, accounts_file))
# child.logfile = os.fdopen(sys.stdout.fileno(), 'w')
child.expect('GPG identity would you like to encrypt with\?', timeout=5)
child.sendline('Testing Name <test@example.com>')
child.expect('Provider:')
child.sendline('Test provider')
child.expect('User ID')
child.sendline('abcdef')
child.expect('password')
child.sendline('123456')
child.expect('Wrote config')
# --------
child = pexpect.spawnu('pyggybank accounts --accounts-file={} --gpg-home={}'.format(accounts_file, gpghome))
#child.logfile = os.fdopen(sys.stdout.fileno(), 'w')
# Will only be called if gpg-agent isn't running.
child.expect('GPG passphrase\:')
child.sendline('Th15154T35t')
child.expect('Test provider')
# Let's get a newline afterwards.
assert True
print()
if __name__ == '__main__':
test_gpg_new_key_prompt()
test_gpg_no_agent()
| {
"content_hash": "bca40a363aadd2a999aedb0280588fca",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 112,
"avg_line_length": 27.141592920353983,
"alnum_prop": 0.6507988262145419,
"repo_name": "pelson/pyggybank",
"id": "de73f1e30b10e028598467929425da35cc1c4e27",
"size": "3067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_wizard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "509"
},
{
"name": "HTML",
"bytes": "7689"
},
{
"name": "JavaScript",
"bytes": "8926"
},
{
"name": "Python",
"bytes": "43408"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import io
import logging
import os
import re
from datetime import datetime
import salt.client
import salt.config
import salt.runner
import six
import yaml
from django.conf import settings
from stackdio.salt.utils.logging import setup_logfile_logger
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
COLOR_REGEX = re.compile(r'\[0;[\d]+m')
ERROR_REQUISITE = 'One or more requisite failed'
class StackdioSaltClientException(Exception):
pass
def is_state_error(state_meta):
"""
Determines if the state resulted in an error.
"""
return not state_meta['result']
def state_to_dict(state_string):
"""
Takes the state string and transforms it into a dict of key/value
pairs that are a bit easier to handle.
Before: group_|-stackdio_group_|-abe_|-present
After: {
'module': 'group',
'function': 'present',
'name': 'abe',
'declaration_id': 'stackdio_group'
}
"""
state_labels = settings.STATE_EXECUTION_FIELDS
state_fields = state_string.split(settings.STATE_EXECUTION_DELIMITER)
return dict(zip(state_labels, state_fields))
def is_requisite_error(state_meta):
"""
Is the state error because of a requisite state failure?
"""
return ERROR_REQUISITE in state_meta['comment']
def is_recoverable(err):
"""
Checks the provided error against a blacklist of errors
determined to be unrecoverable. This should be used to
prevent retrying of provisioning or orchestration because
the error will continue to occur.
"""
# TODO: determine the blacklist of errors that
# will trigger a return of False here
return True
def state_error(state_str, state_meta):
"""
Takes the given state result string and the metadata
of the state execution result and returns a consistent
dict for the error along with whether or not the error
is recoverable.
"""
state = state_to_dict(state_str)
func = '{module}.{func}'.format(**state)
decl_id = state['declaration_id']
err = {
'error': state_meta['comment'],
'function': func,
'declaration_id': decl_id,
}
if 'stderr' in state_meta['changes']:
err['stderr'] = state_meta['changes']['stderr']
if 'stdout' in state_meta['changes']:
err['stdout'] = state_meta['changes']['stdout']
return err, is_recoverable(err)
def process_sls_result(sls_result, err_file):
if 'out' in sls_result and sls_result['out'] != 'highstate':
logger.debug('This isn\'t highstate data... it may not process correctly.')
raise StackdioSaltClientException('Missing highstate data from the orchestrate runner.')
ret = {
'failed': False,
'succeeded_hosts': set(),
'failed_hosts': set(),
}
if 'ret' not in sls_result:
ret['failed'] = True
return ret
# Loop over the host items
for host, state_results in sls_result['ret'].items():
sorted_result = sorted(state_results.values(), key=lambda x: x['__run_num__'])
for stage_result in sorted_result:
if stage_result.get('result', False):
# Result is true, we succeeded
ret['succeeded_hosts'].add(host)
continue
# We have failed - add ourselves to the failure list
ret['failed'] = True
ret['failed_hosts'].add(host)
# Check to see if it's a requisite error - if so, we don't want to clutter the
# logs, so we'll continue on.
if is_requisite_error(stage_result):
continue
# Write to the error log
with io.open(err_file, 'at') as f:
yaml.safe_dump(stage_result, f)
return ret
def process_times(sls_result):
if 'ret' not in sls_result:
return
max_time_map = {}
for state_results in sls_result['ret'].values():
for stage_label, stage_result in state_results.items():
# Pull out the duration
if 'duration' in stage_result:
current = max_time_map.get(stage_label, 0)
duration = stage_result['duration']
try:
if isinstance(duration, six.string_types):
new_time = float(duration.split()[0])
else:
new_time = float(duration)
except ValueError:
# Make sure we never fail
new_time = 0
# Only set the duration if it's higher than what we already have
# This should be all we care about - since everything is running in parallel,
# the bottleneck is the max time
max_time_map[stage_label] = max(current, new_time)
time_map = {}
# aggregate into modules
for stage_label, max_time in max_time_map.items():
info_dict = state_to_dict(stage_label)
current = time_map.get(info_dict['module'], 0)
# Now we want the sum since these are NOT running in parallel.
time_map[info_dict['module']] = current + max_time
for smodule, time in sorted(time_map.items()):
logger.info('Module {0} took {1} total seconds to run'.format(smodule, time / 1000))
def process_orchestrate_result(result, err_file):
ret = {
'failed': False,
'succeeded_sls': {},
'failed_sls': {},
'cancelled_sls': {},
}
if 'data' not in result:
with io.open(err_file, 'at') as f:
f.write('Orchestration result is missing information:\n\n')
f.write(six.text_type(result))
ret['failed'] = True
return ret
# The actual info we want is nested in the 'data' key
result = result['data']
opts = salt.config.client_config(settings.STACKDIO_CONFIG.salt_master_config)
if not isinstance(result, dict):
with io.open(err_file, 'at') as f:
f.write('Orchestration failed. See below.\n\n')
f.write(six.text_type(result))
ret['failed'] = True
return ret
if opts['id'] not in result:
with io.open(err_file, 'at') as f:
f.write('Orchestration result is missing information:\n\n')
f.write(six.text_type(result))
ret['failed'] = True
return ret
result = result[opts['id']]
if not isinstance(result, dict):
with io.open(err_file, 'at') as f:
f.write(six.text_type(result))
raise StackdioSaltClientException(result)
for sls, sls_result in sorted(result.items(), key=lambda x: x[1]['__run_num__']):
sls_dict = state_to_dict(sls)
logger.info('Processing stage {0}'.format(sls_dict['name']))
if 'changes' in sls_result:
process_times(sls_result['changes'])
logger.info('')
if sls_result.get('result', False):
# This whole sls is good! Add that to the ret dict and move on.
sls_ret_dict = {
'failed': False,
'failed_hosts': set(),
'succeeded_hosts': set(),
}
if 'changes' in sls_result:
sls_ret_dict['succeeded_hosts'] = set(sls_result['changes'].get('ret', {}).keys())
# Write a message to the error log
with io.open(err_file, 'at') as f:
if sls_ret_dict['succeeded_hosts']:
f.write(
'Stage {} succeeded and returned {} host info object{}\n\n'.format(
sls_dict['name'],
len(sls_ret_dict['succeeded_hosts']),
'' if len(sls_ret_dict['succeeded_hosts']) == 1 else 's',
)
)
else:
f.write('Stage {} succeeded, but appears to have no changes.\n\n'.format(
sls_dict['name'],
))
# Add to the success map
ret['succeeded_sls'][sls_dict['name']] = sls_ret_dict
else:
# We failed - print a message to the log.
with io.open(err_file, 'at') as f:
if 'changes' in sls_result and 'ret' in sls_result['changes']:
f.write(
'Stage {} failed and returned {} host info object{}\n\n'.format(
sls_dict['name'],
len(sls_result['changes']['ret']),
'' if len(sls_result['changes']['ret']) == 1 else 's',
)
)
else:
f.write(
'Stage {} failed, but appears to have no changes. See below.\n'.format(
sls_dict['name'],
)
)
# Print the failure comment
if 'comment' in sls_result:
comment = sls_result['comment']
if isinstance(comment, six.string_types):
f.write('{}\n\n'.format(COLOR_REGEX.sub('', comment)))
else:
f.write('{}\n\n'.format(yaml.safe_dump(comment)))
if 'changes' in sls_result:
# Process the info to see which hosts failed (will then print more info)
sls_ret_dict = process_sls_result(sls_result['changes'], err_file)
else:
# Just set it to empty since we have no changes to go off of
sls_ret_dict = {
'failed': True,
'failed_hosts': set(),
'succeeded_hosts': set(),
}
# Add to the failure sls list
ret['failed'] = True
if ERROR_REQUISITE in sls_result['comment']:
# Requisite error means we were cancelled
ret['cancelled_sls'][sls_dict['name']] = sls_ret_dict
else:
# No requisite error, we actually failed
ret['failed_sls'][sls_dict['name']] = sls_ret_dict
return ret
class LoggingContextManager(object):
def __init__(self, run_type, root_dir, log_dir):
self.run_type = run_type
self.root_dir = root_dir
self.log_dir = log_dir
self.log_file = None
self.err_file = None
self._file_log_handler = None
self._old_handlers = []
@staticmethod
def _symlink(source, target):
"""
Symlink the given source to the given target
"""
if os.path.islink(target):
os.remove(target)
# Do a relative symlink instead of absolute
os.symlink(os.path.relpath(source, os.path.dirname(target)), target)
def _set_up_logging(self):
now = datetime.now().strftime('%Y%m%d-%H%M%S')
self.log_file = os.path.join(self.log_dir, '{}.{}.log'.format(now, self.run_type))
self.err_file = os.path.join(self.log_dir, '{}.{}.err'.format(now, self.run_type))
log_symlink = os.path.join(self.root_dir, '{}.log.latest'.format(self.run_type))
err_symlink = os.path.join(self.root_dir, '{}.err.latest'.format(self.run_type))
# "touch" the log file and symlink it to the latest
for l in (self.log_file, self.err_file):
with io.open(l, 'w') as _:
pass
self._symlink(self.log_file, log_symlink)
self._symlink(self.err_file, err_symlink)
self._file_log_handler = setup_logfile_logger(self.log_file)
# Remove the other handlers, but save them so we can put them back later
for handler in root_logger.handlers:
if isinstance(handler, logging.StreamHandler):
self._old_handlers.append(handler)
root_logger.removeHandler(handler)
def _tear_down_logging(self):
# First remove our new log handler if it exists
if self._file_log_handler:
root_logger.removeHandler(self._file_log_handler)
# Then re-add the old handlers we removed earlier
for handler in self._old_handlers:
root_logger.addHandler(handler)
# Reset our variables
self._file_log_handler = None
self._old_handlers = []
# Make it a context manager
def __enter__(self):
self._set_up_logging()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._tear_down_logging()
class StackdioLocalClient(LoggingContextManager):
def __init__(self, *args, **kwargs):
super(StackdioLocalClient, self).__init__(*args, **kwargs)
self.salt_client = salt.client.LocalClient(settings.STACKDIO_CONFIG.salt_master_config)
def run(self, target, function, **kwargs):
result = self.salt_client.cmd_iter(target, function, **kwargs)
ret = {
'failed': False,
'failed_hosts': set(),
'succeeded_hosts': set(),
'num_hosts': 0,
}
for i in result:
for host, result in i.items():
ret['num_hosts'] += 1
host_errors = self.process_result(host, result)
if host_errors:
# We failed.
ret['failed'] = True
ret['failed_hosts'].add(host)
with io.open(self.err_file, 'at') as f:
f.write('Errors on host {}:\n'.format(host))
yaml.safe_dump(host_errors, f)
f.write('\n')
else:
# We succeeded!
ret['succeeded_hosts'].add(host)
return ret
@staticmethod
def process_result(host, result):
"""
Process the host result. Should return a list of errors, an empty list
signifying no errors were found.
:param host: the name of the host
:param result: the host dictionary
:return: a list of errors
"""
logger.debug('result for {}: {}'.format(host, result))
states = result['ret']
errors = []
# If we don't have a dict-like object, we know we have an error, just return the states
if not isinstance(states, dict):
return states
for state_str, state_meta in states.items():
if not is_state_error(state_meta):
# Just go on to the next one, no error found
continue
# Now we know there is an error
if not is_requisite_error(state_meta):
# But we only care about it if it's not a requisite error
err, _ = state_error(state_str, state_meta)
errors.append(err)
return errors
class StackdioRunnerClient(LoggingContextManager):
def __init__(self, *args, **kwargs):
super(StackdioRunnerClient, self).__init__(*args, **kwargs)
opts = salt.config.client_config(settings.STACKDIO_CONFIG.salt_master_config)
self.salt_runner = salt.runner.RunnerClient(opts)
def orchestrate(self, **kwargs):
result = self.salt_runner.cmd('state.orchestrate', **kwargs)
return process_orchestrate_result(result, self.err_file)
| {
"content_hash": "6310a69584209bb0015a230a61dd9e91",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 98,
"avg_line_length": 33.909492273730685,
"alnum_prop": 0.5536748909576199,
"repo_name": "stackdio/stackdio",
"id": "3914a986ccb293feb52de1967ba49e39bd6902dc",
"size": "15972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackdio/salt/utils/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6462"
},
{
"name": "HTML",
"bytes": "200474"
},
{
"name": "JavaScript",
"bytes": "365621"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1034237"
},
{
"name": "SaltStack",
"bytes": "4594"
},
{
"name": "Scheme",
"bytes": "2371"
},
{
"name": "Shell",
"bytes": "6131"
}
],
"symlink_target": ""
} |
"""
App configuration for ``polymorphic_auth.email`` app.
"""
# Register signal handlers, but avoid interacting with the database.
# See: https://docs.djangoproject.com/en/1.8/ref/applications/#django.apps.AppConfig.ready
from django.apps import AppConfig
from django.utils.module_loading import autodiscover_modules
class AppConfig(AppConfig):
name = '.'.join(__name__.split('.')[:-1]) # Portable
label = 'polymorphic_auth_email'
| {
"content_hash": "b47e19dccc06839d50d7f47e0feee5d9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 90,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.7319819819819819,
"repo_name": "ixc/django-polymorphic-auth",
"id": "15d8b6480aa6b6050555e7b2141cdd9c93e41fb7",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polymorphic_auth/usertypes/email/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45505"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import pytest
import schematec.converters as converters
import schematec.exc as exc
def test_none():
with pytest.raises(exc.ConvertationError):
converters.array(None)
def test_empty_list():
assert converters.array([]) == []
def test_empty_tuple():
assert converters.array(()) == []
def test_empty_dict():
with pytest.raises(exc.ConvertationError):
converters.array({})
def test_empty_str():
assert converters.array('') == []
def test_empty_unicode():
assert converters.array(u'') == []
def test_full_list():
assert converters.array([0, 1, '0', '1', [], {}]) == [0, 1, '0', '1', [], {}]
def test_full_tuple():
assert converters.array((0, 1, '0', '1', [], {})) == [0, 1, '0', '1', [], {}]
def test_full_dict():
with pytest.raises(exc.ConvertationError):
converters.array({0: '0', 1: '1'})
def test_full_str():
assert converters.array('0') == ['0']
def test_full_unicode():
assert converters.array(u'0') == [u'0']
def test_int():
with pytest.raises(exc.ConvertationError):
converters.array(0)
def test_generator():
assert converters.array((x for x in xrange(5))) == [0, 1, 2, 3, 4]
def test_list_comprehention():
assert converters.array([x for x in xrange(5)]) == [0, 1, 2, 3, 4]
def test_object():
class Object(object):
pass
with pytest.raises(exc.ConvertationError):
converters.array(Object())
| {
"content_hash": "337f2a93a19d903c2ef8b315b8b73bcf",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 81,
"avg_line_length": 19.95945945945946,
"alnum_prop": 0.6086662153012864,
"repo_name": "mylokin/schematec",
"id": "114baa60f3db3ba0efee775fe824e36c62df1ef9",
"size": "1477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_converter_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "31256"
}
],
"symlink_target": ""
} |
from lice.core import __version__
from setuptools import setup, find_packages
long_description = open('README.rst').read()
setup(
name="lice",
version=__version__,
author="Jeremy Carbaugh",
author_email="jcarbaugh@gmail.com",
url='https://github.com/jcarbaugh/lice',
description='Generate a license file for a project',
long_description=long_description,
license='BSD',
packages=find_packages(),
package_data={'lice': ['*.txt']},
include_package_data=True,
entry_points={
'console_scripts': ['lice = lice:main']},
platforms=['any'],
)
| {
"content_hash": "0b7959f7d48f28afffa2c2a8904507be",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 56,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6532663316582915,
"repo_name": "stupidamigo/lice-python",
"id": "93ddaed4599b099c3c70a24d5160355f3a709d2e",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from typing import Any
from argparse import ArgumentParser
from zerver.lib.actions import do_deactivate_realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Script to deactivate a realm."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if realm.deactivated:
print("The realm", options["realm_id"], "is already deactivated.")
exit(0)
print("Deactivating", options["realm_id"])
do_deactivate_realm(realm)
print("Done!")
| {
"content_hash": "cb8cfbae48e09c914bc949f20e0632db",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 32.608695652173914,
"alnum_prop": 0.6626666666666666,
"repo_name": "Galexrt/zulip",
"id": "1704de37a55db47618148a46d75c7ca700c37ee7",
"size": "751",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/management/commands/deactivate_realm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "181865"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "376447"
},
{
"name": "JavaScript",
"bytes": "1570488"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "93562"
},
{
"name": "Python",
"bytes": "1830400"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32366"
}
],
"symlink_target": ""
} |
"""A binary to train CIFAR-10 using multiple GPUs with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope, images, labels):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
images: Images. 4D tensor of shape [batch_size, height, width, 3].
labels: Labels. 1D tensor of shape [batch_size].
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size / FLAGS.num_gpus)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
images = tf.reshape(images, [cifar10.FLAGS.batch_size, 24, 24, 3])
labels = tf.reshape(labels, [cifar10.FLAGS.batch_size])
batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * FLAGS.num_gpus)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Dequeues one batch for the GPU
image_batch, label_batch = batch_queue.dequeue()
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope, image_batch, label_batch)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "bc52929982cb3889d1b6cb064b7b0b75",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 80,
"avg_line_length": 38.13636363636363,
"alnum_prop": 0.646404449741756,
"repo_name": "alexgorban/models",
"id": "8cb8a096f844acace21a6a74e294e5a6e13ce5ff",
"size": "10758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/image/cifar10/cifar10_multi_gpu_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1619012"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "454746"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "16363107"
},
{
"name": "Shell",
"bytes": "144095"
},
{
"name": "Starlark",
"bytes": "148029"
}
],
"symlink_target": ""
} |
"""Moves patterns in path formats (suitable for moving articles)."""
from __future__ import division, absolute_import, print_function
import re
from beets.plugins import BeetsPlugin
__author__ = 'baobab@heresiarch.info'
__version__ = '1.1'
PATTERN_THE = u'^[the]{3}\s'
PATTERN_A = u'^[a][n]?\s'
FORMAT = u'{0}, {1}'
class ThePlugin(BeetsPlugin):
patterns = []
def __init__(self):
super(ThePlugin, self).__init__()
self.template_funcs['the'] = self.the_template_func
self.config.add({
'the': True,
'a': True,
'format': u'{0}, {1}',
'strip': False,
'patterns': [],
})
self.patterns = self.config['patterns'].as_str_seq()
for p in self.patterns:
if p:
try:
re.compile(p)
except re.error:
self._log.error(u'invalid pattern: {0}', p)
else:
if not (p.startswith('^') or p.endswith('$')):
self._log.warning(u'warning: \"{0}\" will not '
u'match string start/end', p)
if self.config['a']:
self.patterns = [PATTERN_A] + self.patterns
if self.config['the']:
self.patterns = [PATTERN_THE] + self.patterns
if not self.patterns:
self._log.warning(u'no patterns defined!')
def unthe(self, text, pattern):
"""Moves pattern in the path format string or strips it
text -- text to handle
pattern -- regexp pattern (case ignore is already on)
strip -- if True, pattern will be removed
"""
if text:
r = re.compile(pattern, flags=re.IGNORECASE)
try:
t = r.findall(text)[0]
except IndexError:
return text
else:
r = re.sub(r, '', text).strip()
if self.config['strip']:
return r
else:
fmt = self.config['format'].as_str()
return fmt.format(r, t.strip()).strip()
else:
return u''
def the_template_func(self, text):
if not self.patterns:
return text
if text:
for p in self.patterns:
r = self.unthe(text, p)
if r != text:
break
self._log.debug(u'\"{0}\" -> \"{1}\"', text, r)
return r
else:
return u''
| {
"content_hash": "d4a938005f572c5eaa5eb59db9c72e7d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 71,
"avg_line_length": 30.011764705882353,
"alnum_prop": 0.47040376323010585,
"repo_name": "diego-plan9/beets",
"id": "cfb583ced1408ecdb74372153246ffc2009ecc7b",
"size": "3250",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "beetsplug/the.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1820166"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.