text
stringlengths
4
1.02M
meta
dict
from django import template from django.conf import settings from django.templatetags.static import static from django.utils.safestring import mark_safe register = template.Library() @register.filter(is_safe=True) def dep(value="polymer/polymer.html"): # should you really be able to do this over reverse_lazy??? static_url = static( "materialdjango/components/bower_components/%s" % value) return '<link rel="import" href="{0}">'.format(static_url) @register.simple_tag def polymer_shim(): static_url = static('materialdjango/components/bower_components/webcomponentsjs/webcomponents.js') return mark_safe("<script src='{0}'></script>".format(static_url))
{ "content_hash": "0241225e217aa607c7c3096aca0503cf", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 102, "avg_line_length": 34.5, "alnum_prop": 0.7391304347826086, "repo_name": "Colorless-Green-Ideas/MaterialDjango", "id": "a1bba7ff62a6a58df905f60f5e60afc4ecbbb3ba", "size": "690", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "materialdjango/templatetags/polymerdep.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "225" }, { "name": "HTML", "bytes": "4094" }, { "name": "JavaScript", "bytes": "649112" }, { "name": "Python", "bytes": "12890" } ], "symlink_target": "" }
""" Models class holds the different models used during Extraction """ __author__ = """\n""".join(['Jeffrey Schmidt (jschmid1@binghamton.edu', 'Benjamin Bush (benjaminjamesbush@gmail.com)', 'Hiroki Sayama (sayama@binghamton.edu)']) __all__ = ['addModel','getModelList','addDefaultModelsToList'] # Copyright (C) 2012 by # Jeffrey Schmidt <jschmid1@binghamton.edu> # Benjamin Bush <benjaminjamesbush@gmail.com> # Hiroki Sayama <sayama@binghamton.edu> # All rights reserved. # BSD license. import Model class Models(object): def __init__(self): self.modelList = [] def addModel(self, model): """Mutator that adds a model to the model list Parameters ---------- model : Function object Model function to be used during extraction Returns ------- void """ self.modelList.append(model) def getModelList(self): """Accessor that returns the model list Parameters ---------- None Returns ------- modelList : List of model instances List of model instances to be used for Extraction """ return self.modelList def addDefaultModelsToList(self): """Function that adds a default set of model classes to the model list Parameters ---------- None Returns ------- Void """ def state(G, nodeID, state): stateVal = 0.001 if state in G.node[nodeID]: if nodeID in G.nodes(): if state in G.node[nodeID]: stateVal = 0.001 if G.node[nodeID][state] == 0 else G.node[nodeID][state] else: raise KeyError, "The nodeID: #%d does not exist in the graph." %nodeID else: raise KeyError, "State information does not exist for this graph." return stateVal def ImprovedState(G, subgraph, state): states = [G.node[node][state] for node in G.nodes()] uniqueStates = set(states) uniqueStateOccuranceMap = {} for states in uniqueStates: uniqueStateOccuranceMap[states] = len([node for node in G.nodes() if G.node[node][state] == states]) stateCheck = [(state in G.node[x]) for x in subgraph.nodes()] if False in stateCheck: raise KeyError, "State information does not exist for this graph." else: retVal = 0. for node in subgraph.nodes(): currentState = G.node[node][state] retVal += (1.0/uniqueStateOccuranceMap[currentState] ) * ( 1.0/len(uniqueStates)) return retVal def degree(G, subgraph, state): degreeVal = 0. nodeTest = [node in G.nodes() for node in subgraph.nodes()] if False in nodeTest: raise KeyError, "The nodeID: #%d does not exist in the graph." %subgraph.nodes()[nodeTest.index(False)] else: for nodes in subgraph: degreeVal += G.degree(nodes) degreeVal = 0.001 if degreeVal == 0 else degreeVal return degreeVal def stateDegree(G, nodeID, state): stateDegreeVal = 0. if nodeID in G.nodes(): if state in G.node[nodeID]: stateVal = 0.001 if G.node[nodeID][state] == 0 else G.node[nodeID][state] degreeVal = 0.001 if G.degree(nodeID) == 0 else G.degree(nodeID) stateDegreeVal = degreeVal * stateVal else: raise KeyError, "State information does not exist for this graph." else: raise KeyError, "The nodeID: #%d does not exist in the graph." %nodeID stateDegreeVal = 0.001 if stateDegreeVal == 0 else stateDegreeVal return stateDegreeVal def degreeState(G, subgraph, state): nodeTest = [node in G.nodes() for node in subgraph.nodes()] if False in nodeTest: raise KeyError, "The nodeID: #%d does not exist in the graph." %subgraph.nodes()[nodeTest.index(False)] else: stateCheck = [(state in G.node[x]) for x in subgraph.nodes()] if False in stateCheck: raise KeyError, "State information does not exist for this graph." else: retVal = 0. states = [G.node[node][state] for node in G.nodes()] uniqueStates = set(states) degreeTotalForStateMap = {} for states in uniqueStates: degreeTotalForStateMap[states] = sum([G.degree(node)+1 for node in G.nodes() if G.node[node][state] == states]) for node in subgraph: currentState = G.node[node][state] retVal += ((G.degree(node)+1.0)/degreeTotalForStateMap[currentState] ) * ( 1.0/len(uniqueStates)) return retVal def baseCase(G, nodeID, state): return 1 degreeModel = Model.Model(degree, 'degree') stateModel = Model.Model(ImprovedState, 'state') degreeStateModel = Model.Model(degreeState, 'degreeState') baseModel = Model.Model(baseCase, 'baseCase') self.addModel(degreeModel) self.addModel(stateModel) self.addModel(degreeStateModel) #self.addModel(baseModel)
{ "content_hash": "fd231a95f6ce08ba56d89c3eac102a51", "timestamp": "", "source": "github", "line_count": 155, "max_line_length": 135, "avg_line_length": 37.87096774193548, "alnum_prop": 0.5248722316865417, "repo_name": "schmidtj/PyGNA", "id": "c5fcb1753219d4245d511e18ff6367cc0ad65b37", "size": "5870", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "PyGNA/Models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "237868" }, { "name": "Shell", "bytes": "137" } ], "symlink_target": "" }
import numpy as np import progtimer from scipy.io import wavfile def timelapse(input_signal, ratio, window_size, overlap=4): hop_in = window_size/overlap hop_out = hop_in/ratio #print "Ratio: ", float(Hin)/float(Hout), " Hout:", Hout # pre- and postpad padding = window_size - hop_in signal_in = np.zeros(len(input_signal) + 2*padding) signal_in[padding : len(input_signal)+padding] = input_signal length = len(signal_in) window = np.hanning(window_size) signal_out = np.zeros(length/ratio + window_size) unit_angle = np.ones(window_size, dtype='complex') non_zero = np.ones(window_size) * 1e-15 pt = progtimer.ProgTimer() p_in = 0 p_out = 0 while p_in < length - (window_size + hop_out): p1_int = int(p_in) spectrum_last = np.fft.fft(window * signal_in[p1_int : p1_int+window_size]) magnitude_last = np.abs(spectrum_last) p2_int = int(p_in + hop_out) spectrum = np.fft.fft(window * signal_in[p2_int : p2_int+window_size]) magnitude = np.abs(spectrum) unit_angle *= (spectrum * magnitude_last) / (magnitude * spectrum_last + non_zero) p_out_int = int(p_out) signal_out[p_out_int : p_out_int + window_size] += window * np.fft.ifft(unit_angle*magnitude).real p_out += hop_out p_in += hop_in pt.tick(p_in, length) return signal_out[0:p_out-hop_out] def main(): window_size = 16384 condensation_ratio = 10 overlap = 8 filename = "lafille" (sampling_rate, signal_in) = wavfile.read(filename+".wav") #signal_in = np.append(np.zeros(window_size/2), signal_in) signal_out = timelapse(signal_in, condensation_ratio, window_size, overlap=overlap) signal_out = np.nan_to_num(signal_out) signal_out -= np.mean(signal_out) # account for dc offset signal_out *= 2**14 / np.amax(signal_out) signal_out = np.array(signal_out, dtype="int16") wavfile.write(filename+"_timelapse.wav", sampling_rate, signal_out) print("Done.") main()
{ "content_hash": "76618f9cec03843ec44e8033ee79c88f", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 106, "avg_line_length": 29.314285714285713, "alnum_prop": 0.6296296296296297, "repo_name": "ollierik/audio-timelapse", "id": "3d847fde66ecc8b2ad10c2613b9af7352fe58690", "size": "2257", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "timelapse.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "3359" } ], "symlink_target": "" }
from urllib.request import urlopen import re import json import sys from collections import OrderedDict import logging from bs4 import BeautifulSoup def with_command(tag): tokens = ["submenu", "sub-menu", "command"] txt = tag.get_text().lower() return any(token in txt for token in tokens) def with_readonly(tag): tokens = ["readonly", "read-only", "read only"] txt = tag.get_text().lower() return any(token in txt for token in tokens) def parse_page(page_url): response = urlopen(page_url) soup = BeautifulSoup(response.read(), "html5lib") container = soup.find(class_="manual") if not container: container = soup.find(id="bodyContent") result = [] current_commands = None read_only = False def_regexp_strict = re.compile(r""" ^(?P<prop>[^\s]+) \s\( (?P<values>[^;]*) (;\s(D|d)efault:\s?(?P<default>.*)|;|) \)$ """, re.VERBOSE) def_regexp_nonstrict = re.compile(r""" ^(?P<prop>.+) \s\( (?P<values>.*) \)$ """, re.VERBOSE | re.DOTALL) for elem in container.children: # Skip regular strings and tocs if isinstance(elem, str) or elem.get('id') == "toc": continue # Look for tables if elem.get("class") and "styled_table" in elem['class']: # Analyze table headers headers = elem.find_all("th") if not headers: log.info("Found table without headers, skipping") continue else: col1_title = headers[0].get_text().strip().lower() if col1_title != "property": log.info("Found table with header: {}, skipping".format(col1_title)) continue log.debug("Found property table") if current_commands is None: log.warning("Current commands is undefined, skipping") continue # Analyze rows for row in elem.find_all("tr"): cells = row.find_all("td") if not cells: continue definition = cells[0].get_text() mt_strict = def_regexp_strict.match(definition) mt_nonstrict = def_regexp_nonstrict.match(definition) if mt_strict: prop = mt_strict.group("prop").strip() values_raw = mt_strict.group("values").split("|") if len(values_raw) > 1: values = [v.strip() for v in values_raw] else: values = values_raw[0].strip() or None default = mt_strict.group("default") or None if read_only and default is not None: log.info("Default value for read-only property: {0}".format(prop)) elif mt_nonstrict: prop = mt_nonstrict.group("prop").strip() values = mt_nonstrict.group("values").strip() or None default = None else: log.info("Unable to parse definition: {}".format(definition)) continue description = cells[1].get_text().strip() for command in current_commands: result.append({ 'name': prop, command: { 'type': ['readwrite', 'readonly'][read_only], 'values': values, 'default': default, 'description': description, }, }) # Reset read-only flag read_only = False # Skip other checks continue # Look for "submenu" declaration if with_command(elem) or elem.find(with_command): codes = [code.string for code in elem.find_all("code") if code.string and code.string.strip().startswith("/")] if not codes: continue elif len(codes) > 1: log.debug("Miltiple code tags") current_commands = [comm.strip() for comm in codes[0].split(",")] log.debug("Found commands: {0}".format(current_commands)) # Look for "read-only" mark if with_readonly(elem) or elem.find(with_readonly): read_only = True log.debug("Found 'read-only' mark") return result def get_pages(root, toc_url): response = urlopen(root + toc_url) soup = BeautifulSoup(response.read()) tables = soup.find_all(id="shtable") toc = OrderedDict() for table in tables: rows = table.find_all("tr") cells = zip(rows[0].find_all("td"), rows[1].find_all("td")) for head, lst in cells: # Menus menu = head.get_text().strip() if not menu: continue toc[menu] = [] # Pages for li in lst.find_all("li"): link = root + li.a["href"] toc[menu].append(link) return toc def parse_wiki(): toc = get_pages("http://wiki.mikrotik.com", "/wiki/Manual:TOC_by_Menu") results = {} for menu, pages in toc.items(): log.info("Processing: {}".format(menu)) for page in pages: log.info("Processing: {}".format(page)) for propdef in parse_page(page): name = propdef.pop('name') if name not in results: results[name] = {'name': name, 'references': {}} results[name]['references'].update(propdef) return list(results.values()) logging.basicConfig( format="{asctime} [{levelname}] :: {message}", style='{', level=logging.INFO, handlers=[logging.StreamHandler()]) log = logging.getLogger(__name__) if __name__ == "__main__": with open(sys.argv[1], "w") as f: json.dump(parse_wiki(), f)
{ "content_hash": "c6644f63206b59844d6f017dcf12f6e6", "timestamp": "", "source": "github", "line_count": 168, "max_line_length": 90, "avg_line_length": 36.05357142857143, "alnum_prop": 0.5061911837543338, "repo_name": "Kentzo/MikrotikWikiParser", "id": "c4c521da9aae5fa2d13674057b31a68ddb96b9e7", "size": "6057", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mikrotik.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "5492" } ], "symlink_target": "" }
import os, subprocess from posixpath import join, basename, splitext, exists from django.core import urlresolvers from django.utils.translation import ugettext_lazy as _ from django.db import models from django.conf import settings from cms.models import CMSPlugin from filer.fields.file import FilerFileField from filer.models.filemodels import File from filer import settings as filer_settings class Video(File): @classmethod def matches_file_type(cls, iname, ifile, request): # the extensions we'll recognise for this file type filename_extensions = ['.dv', '.mov', '.mp4', '.avi', '.wmv',] ext = os.path.splitext(iname)[1].lower() return ext in filename_extensions # we get to use Filer's video icon free _icon = "video" # def get_admin_url_path(self): # return urlresolvers.reverse('admin:filer_video_change', args=(self.id,)) class ArkestraVideo(Video): class Meta: proxy = True verbose_name = "Video" class FilerVideoField(FilerFileField): default_model_class = Video class VideoPluginEditor(CMSPlugin): LEFT = "left" RIGHT = "right" FLOAT_CHOICES = ((LEFT, _("left")), (RIGHT, _("right")), ) video = FilerVideoField() VIDEO_WIDTHS = ( (1000.0, u"Automatic"), (u'Widths relative to the containing column', ( (100.0, u"100%"), (75.0, u"75%"), (66.7, u"66%"), (50.0, u"50%"), (33.3, u"33%"), (25.0, u"25%"), ) ), ('', u"Video's native width - on your head be it"), ) width = models.FloatField(null=True, blank=True, choices = VIDEO_WIDTHS, default = 1000.0) use_description_as_caption = models.BooleanField(verbose_name = "Use description", default=False, help_text = "Use image's description field as caption") caption = models.TextField(_("Caption"), blank=True, null=True) float = models.CharField(_("float"), max_length=10, blank=True, null=True, choices=FLOAT_CHOICES) def __unicode__(self): if self.video: return self.video.label else: return u"Video %s" % self.caption return '' class VideoVersion(models.Model): source = FilerVideoField() size = models.SmallIntegerField(blank=True, null=True,) codec = models.CharField(max_length=20, blank=True, null=True,) status = models.CharField(max_length=20, default = "newly-created", blank=True, null=True,) def codec_and_size(self): # returns a string containing codec and size - e.g. h264-720 - used in various ways, such as version filenames return "-".join((CODECS[self.codec]["code"], str(self.size))) def outputpath(self): # the output path and filename for the version return os.path.join(self.abs_directory_path(), \ "-".join((self.filename_without_extension(), \ self.codec_and_size())) \ + CODECS[self.codec]["extension"]) def filename_without_extension(self): # e.g. "video" return os.path.splitext(self.filename())[0].lower() def abs_directory_path(self): # e.g. "/var/www/html/arkestra_medic/media/filer_private/2010/11/23/output" return os.path.join(settings.MEDIA_ROOT, "rendered_video", self.directory()) def filename(self): # e.g. "video.dv" return os.path.basename(unicode(self.source.file)) def directory(self): # e.g. "filer_private/2010/11/23" print ">> self ", self print ">> self.source ", self.source print ">> self.source.file ", self.source.file print ">> unicode(self.source.file)", unicode(self.source.file) print ">> os.path.dirname(unicode(self.source.file))", os.path.dirname(unicode(self.source.file)) return os.path.dirname(unicode(self.source.file)) def encode(self): print print "======== encoding video =========" print # we're going to create an encoded version of our video # let's find out from the dictionaries what's required codec_profile = VERSIONS[self.codec][self.size] codec_code = CODECS[self.codec]["code"] encoder = codec_profile["encoder"] schema = ENCODERS[encoder]["schema"] command = [encoder] print "codec_profile", codec_profile print "codec_code", codec_code print "encoder", encoder print "schema", schema print "command", command # check the output folder exists; create it if not if not os.path.exists(self.abs_directory_path()): print ">>> the output folder doesn't exist:", self.abs_directory_path() os.makedirs(self.abs_directory_path()) print ">>>that worked!" # loop over the schema and assemble the command for item in schema: # input and output are special cases, because they take values that aren't determined by the schema if item == "input": input_prefix = ENCODERS[encoder].get("input") if input_prefix: command.append(input_prefix) command.append(self.source.file.path) elif item == "output": output_prefix = ENCODERS[encoder].get("output") if output_prefix: command.append(output_prefix) command.append(self.outputpath()) else: for option_prefix, option_value in codec_profile[item].items(): command.extend((option_prefix,str(option_value))) # immediately mark it as "encoding", so nothing else tries to encode it while we're doing this print ">>> mark as encoding" self.status = "encoding" self.save() # now do the encoding and don't let anything after this happen until we finish executing command: print ">>> saved status" print "command:", str(command) exit_status = subprocess.call(command) print exit_status print ">>> exited from", command if exit_status == 0: # it's OK, so mark the version OK self.status = "ready" self.save() print ">>> saved OK" else: self.status = "failed" # mark it as failed because the command returned an error self.save() print ">>> save FAILED", exit_status # we should never return from here with the status still "encoding" - but that has happened - how? return self.status def url(self): # the url for a particular version return os.path.join(settings.MEDIA_URL, \ "rendered_video", \ self.directory(), \ "-".join((self.filename_without_extension(), self.codec_and_size())) \ + CODECS[self.codec]["extension"]) def __unicode__(self): if self.source: return self.source.label else: return u"Video %s" % self.caption return '' """ We have a number of dictionaries to help describe what we're doing. Maybe they should be in settings, but they are here for now. This could be made simpler, but it's more flexible this way - for example, this allows us to prefer one encoder for one size, and a different encoder for another - just in case. ENCODERS provides infomration about the commands that will be used to perform the video re-encoding, in this format. Each item in ENCODERS is the commandline name of the program. Each command has a different schema, because they get their input/output filenames in a different order and with different prefix. """ ENCODERS = { "HandBrakeCLI": { "schema": ("options", "input", "output"), # the order in which the program expects to receive its options "input": "--input", "output": "--output", }, "ffmpeg2theora": { "schema": ("options", "output", "input"), # the schema is quite different from the one above "output": "--output", # "input": "", }, } """ CODECS contains information for the files that are created. 'code' is a slugified version of the codec's name; it's added to the filename 'description' and 'implications' are human-readable information """ CODECS = { "H.264": { "extension": ".mp4", "code": "h264", "description": "MP4/H.264 format video", "implications": " - good support in Safari & iOS", }, "Theora": { "extension": ".ogv", "code": "theora", "description": "Ogg/Theora format video", "implications": " - good support in Firefox", }, } """ SIZES is a tuple of the sizes we can encode to for output. It needs to be in order of increasing size. """ SIZES = (360,720) """ VERSIONS describes the different files we can encode to. Firstly, we list the different codecs we'll employ, then each size for each. 'type' is the type attribute of the <source> element in HTML5 'options' are what we pass to the command """ VERSIONS = { "H.264": { SIZES[0]: { "encoder": "HandBrakeCLI", "type": 'video/mp4; codecs="avc1.42E01E, .mp4a.40.2"', #supposedly, we should use the codecs attribute of the type attribute, but all it does for me is make Theora video stop working in Firefox "options": { "--preset": "iPhone & iPod Touch", "--width": SIZES[0], #"--vb": "600", "--two-pass": "", "--turbo": "", "--optimize": "", }, }, SIZES[1]: { "encoder": "HandBrakeCLI", "type": 'video/mp4; codecs="avc1.42E01E, .mp4a.40.2"', "options": { "--preset": "iPhone & iPod Touch", "--width": SIZES[1], #"--vb": "600", "--two-pass": "", "--turbo": "", "--optimize": "", }, }, }, "Theora": { SIZES[0]: { "encoder": "ffmpeg2theora", "type": 'video/ogg; codecs="theora, vorbis"', "options": { "--videoquality": "5", "--audioquality": "1", "--width": SIZES[0], }, }, SIZES[1]: { "encoder": "ffmpeg2theora", "type": 'video/ogg; codecs="theora, vorbis"', "options": { "--videoquality": "5", "--audioquality": "1", "--width": SIZES[1], }, }, }, } """ We provide these so we know we which encoded videos are available or missing for each kind of player. """ PLAYERS = { "HTML5": ("H.264", "Theora"), "FLASH": ("H.264",), }
{ "content_hash": "6092113a96a01868274c86bd2e8d0ba8", "timestamp": "", "source": "github", "line_count": 306, "max_line_length": 210, "avg_line_length": 36.705882352941174, "alnum_prop": 0.5564458689458689, "repo_name": "bubenkoff/Arkestra", "id": "e6c3586e6e9854f7773907cae2860a5dd54fae8e", "size": "11232", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "video/models.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "161649" }, { "name": "HTML", "bytes": "724894" }, { "name": "JavaScript", "bytes": "656447" }, { "name": "Python", "bytes": "1461948" } ], "symlink_target": "" }
from __future__ import unicode_literals from prompt_toolkit.enums import DEFAULT_BUFFER from prompt_toolkit.filters import HasSelection, Condition, EmacsInsertMode, ViInsertMode from prompt_toolkit.keys import Keys from prompt_toolkit.layout.screen import Point from prompt_toolkit.mouse_events import MouseEventType, MouseEvent from prompt_toolkit.renderer import HeightIsUnknownError from prompt_toolkit.utils import suspend_to_background_supported, is_windows from .named_commands import get_by_name from ..registry import Registry __all__ = ( 'load_basic_bindings', 'load_abort_and_exit_bindings', 'load_basic_system_bindings', 'load_auto_suggestion_bindings', ) def if_no_repeat(event): """ Callable that returns True when the previous event was delivered to another handler. """ return not event.is_repeat def load_basic_bindings(): registry = Registry() insert_mode = ViInsertMode() | EmacsInsertMode() handle = registry.add_binding has_selection = HasSelection() @handle(Keys.ControlA) @handle(Keys.ControlB) @handle(Keys.ControlC) @handle(Keys.ControlD) @handle(Keys.ControlE) @handle(Keys.ControlF) @handle(Keys.ControlG) @handle(Keys.ControlH) @handle(Keys.ControlI) @handle(Keys.ControlJ) @handle(Keys.ControlK) @handle(Keys.ControlL) @handle(Keys.ControlM) @handle(Keys.ControlN) @handle(Keys.ControlO) @handle(Keys.ControlP) @handle(Keys.ControlQ) @handle(Keys.ControlR) @handle(Keys.ControlS) @handle(Keys.ControlT) @handle(Keys.ControlU) @handle(Keys.ControlV) @handle(Keys.ControlW) @handle(Keys.ControlX) @handle(Keys.ControlY) @handle(Keys.ControlZ) @handle(Keys.F1) @handle(Keys.F2) @handle(Keys.F3) @handle(Keys.F4) @handle(Keys.F5) @handle(Keys.F6) @handle(Keys.F7) @handle(Keys.F8) @handle(Keys.F9) @handle(Keys.F10) @handle(Keys.F11) @handle(Keys.F12) @handle(Keys.F13) @handle(Keys.F14) @handle(Keys.F15) @handle(Keys.F16) @handle(Keys.F17) @handle(Keys.F18) @handle(Keys.F19) @handle(Keys.F20) @handle(Keys.ControlSpace) @handle(Keys.ControlBackslash) @handle(Keys.ControlSquareClose) @handle(Keys.ControlCircumflex) @handle(Keys.ControlUnderscore) @handle(Keys.Backspace) @handle(Keys.Up) @handle(Keys.Down) @handle(Keys.Right) @handle(Keys.Left) @handle(Keys.ShiftUp) @handle(Keys.ShiftDown) @handle(Keys.ShiftRight) @handle(Keys.ShiftLeft) @handle(Keys.Home) @handle(Keys.End) @handle(Keys.Delete) @handle(Keys.ShiftDelete) @handle(Keys.ControlDelete) @handle(Keys.PageUp) @handle(Keys.PageDown) @handle(Keys.BackTab) @handle(Keys.Tab) @handle(Keys.ControlLeft) @handle(Keys.ControlRight) @handle(Keys.ControlUp) @handle(Keys.ControlDown) @handle(Keys.Insert) @handle(Keys.Ignore) def _(event): """ First, for any of these keys, Don't do anything by default. Also don't catch them in the 'Any' handler which will insert them as data. If people want to insert these characters as a literal, they can always do by doing a quoted insert. (ControlQ in emacs mode, ControlV in Vi mode.) """ pass # Readline-style bindings. handle(Keys.Home)(get_by_name('beginning-of-line')) handle(Keys.End)(get_by_name('end-of-line')) handle(Keys.Left)(get_by_name('backward-char')) handle(Keys.Right)(get_by_name('forward-char')) handle(Keys.ControlUp)(get_by_name('previous-history')) handle(Keys.ControlDown)(get_by_name('next-history')) handle(Keys.ControlL)(get_by_name('clear-screen')) handle(Keys.ControlK, filter=insert_mode)(get_by_name('kill-line')) handle(Keys.ControlU, filter=insert_mode)(get_by_name('unix-line-discard')) handle(Keys.ControlH, filter=insert_mode, save_before=if_no_repeat)( get_by_name('backward-delete-char')) handle(Keys.Backspace, filter=insert_mode, save_before=if_no_repeat)( get_by_name('backward-delete-char')) handle(Keys.Delete, filter=insert_mode, save_before=if_no_repeat)( get_by_name('delete-char')) handle(Keys.ShiftDelete, filter=insert_mode, save_before=if_no_repeat)( get_by_name('delete-char')) handle(Keys.Any, filter=insert_mode, save_before=if_no_repeat)( get_by_name('self-insert')) handle(Keys.ControlT, filter=insert_mode)(get_by_name('transpose-chars')) handle(Keys.ControlW, filter=insert_mode)(get_by_name('unix-word-rubout')) handle(Keys.ControlI, filter=insert_mode)(get_by_name('menu-complete')) handle(Keys.BackTab, filter=insert_mode)(get_by_name('menu-complete-backward')) handle(Keys.PageUp, filter= ~has_selection)(get_by_name('previous-history')) handle(Keys.PageDown, filter= ~has_selection)(get_by_name('next-history')) # CTRL keys. text_before_cursor = Condition(lambda cli: cli.current_buffer.text) handle(Keys.ControlD, filter=text_before_cursor & insert_mode)(get_by_name('delete-char')) is_multiline = Condition(lambda cli: cli.current_buffer.is_multiline()) is_returnable = Condition(lambda cli: cli.current_buffer.accept_action.is_returnable) @handle(Keys.ControlJ, filter=is_multiline & insert_mode) def _(event): " Newline (in case of multiline input. " event.current_buffer.newline(copy_margin=not event.cli.in_paste_mode) @handle(Keys.ControlJ, filter=~is_multiline & is_returnable) def _(event): " Enter, accept input. " buff = event.current_buffer buff.accept_action.validate_and_handle(event.cli, buff) # Delete the word before the cursor. @handle(Keys.Up) def _(event): event.current_buffer.auto_up(count=event.arg) @handle(Keys.Down) def _(event): event.current_buffer.auto_down(count=event.arg) @handle(Keys.Delete, filter=has_selection) def _(event): data = event.current_buffer.cut_selection() event.cli.clipboard.set_data(data) # Global bindings. @handle(Keys.ControlZ) def _(event): """ By default, control-Z should literally insert Ctrl-Z. (Ansi Ctrl-Z, code 26 in MSDOS means End-Of-File. In a Python REPL for instance, it's possible to type Control-Z followed by enter to quit.) When the system bindings are loaded and suspend-to-background is supported, that will override this binding. """ event.current_buffer.insert_text(event.data) @handle(Keys.CPRResponse, save_before=lambda e: False) def _(event): """ Handle incoming Cursor-Position-Request response. """ # The incoming data looks like u'\x1b[35;1R' # Parse row/col information. row, col = map(int, event.data[2:-1].split(';')) # Report absolute cursor position to the renderer. event.cli.renderer.report_absolute_cursor_row(row) @handle(Keys.BracketedPaste) def _(event): " Pasting from clipboard. " data = event.data # Be sure to use \n as line ending. # Some terminals (Like iTerm2) seem to paste \r\n line endings in a # bracketed paste. See: https://github.com/ipython/ipython/issues/9737 data = data.replace('\r\n', '\n') data = data.replace('\r', '\n') event.current_buffer.insert_text(data) @handle(Keys.Any, filter=Condition(lambda cli: cli.quoted_insert), eager=True) def _(event): """ Handle quoted insert. """ event.current_buffer.insert_text(event.data, overwrite=False) event.cli.quoted_insert = False return registry def load_mouse_bindings(): """ Key bindings, required for mouse support. (Mouse events enter through the key binding system.) """ registry = Registry() @registry.add_binding(Keys.Vt100MouseEvent) def _(event): """ Handling of incoming mouse event. """ # Typical: "Esc[MaB*" # Urxvt: "Esc[96;14;13M" # Xterm SGR: "Esc[<64;85;12M" # Parse incoming packet. if event.data[2] == 'M': # Typical. mouse_event, x, y = map(ord, event.data[3:]) mouse_event = { 32: MouseEventType.MOUSE_DOWN, 35: MouseEventType.MOUSE_UP, 96: MouseEventType.SCROLL_UP, 97: MouseEventType.SCROLL_DOWN, }.get(mouse_event) # Handle situations where `PosixStdinReader` used surrogateescapes. if x >= 0xdc00: x-= 0xdc00 if y >= 0xdc00: y-= 0xdc00 x -= 32 y -= 32 else: # Urxvt and Xterm SGR. # When the '<' is not present, we are not using the Xterm SGR mode, # but Urxvt instead. data = event.data[2:] if data[:1] == '<': sgr = True data = data[1:] else: sgr = False # Extract coordinates. mouse_event, x, y = map(int, data[:-1].split(';')) m = data[-1] # Parse event type. if sgr: mouse_event = { (0, 'M'): MouseEventType.MOUSE_DOWN, (0, 'm'): MouseEventType.MOUSE_UP, (64, 'M'): MouseEventType.SCROLL_UP, (65, 'M'): MouseEventType.SCROLL_DOWN, }.get((mouse_event, m)) else: mouse_event = { 32: MouseEventType.MOUSE_DOWN, 35: MouseEventType.MOUSE_UP, 96: MouseEventType.SCROLL_UP, 97: MouseEventType.SCROLL_DOWN, }.get(mouse_event) x -= 1 y -= 1 # Only handle mouse events when we know the window height. if event.cli.renderer.height_is_known and mouse_event is not None: # Take region above the layout into account. The reported # coordinates are absolute to the visible part of the terminal. try: y -= event.cli.renderer.rows_above_layout except HeightIsUnknownError: return # Call the mouse handler from the renderer. handler = event.cli.renderer.mouse_handlers.mouse_handlers[x,y] handler(event.cli, MouseEvent(position=Point(x=x, y=y), event_type=mouse_event)) @registry.add_binding(Keys.WindowsMouseEvent) def _(event): """ Handling of mouse events for Windows. """ assert is_windows() # This key binding should only exist for Windows. # Parse data. event_type, x, y = event.data.split(';') x = int(x) y = int(y) # Make coordinates absolute to the visible part of the terminal. screen_buffer_info = event.cli.renderer.output.get_win32_screen_buffer_info() rows_above_cursor = screen_buffer_info.dwCursorPosition.Y - event.cli.renderer._cursor_pos.y y -= rows_above_cursor # Call the mouse event handler. handler = event.cli.renderer.mouse_handlers.mouse_handlers[x,y] handler(event.cli, MouseEvent(position=Point(x=x, y=y), event_type=event_type)) return registry def load_abort_and_exit_bindings(): """ Basic bindings for abort (Ctrl-C) and exit (Ctrl-D). """ registry = Registry() handle = registry.add_binding @handle(Keys.ControlC) def _(event): " Abort when Control-C has been pressed. " event.cli.abort() @Condition def ctrl_d_condition(cli): """ Ctrl-D binding is only active when the default buffer is selected and empty. """ return (cli.current_buffer_name == DEFAULT_BUFFER and not cli.current_buffer.text) handle(Keys.ControlD, filter=ctrl_d_condition)(get_by_name('end-of-file')) return registry def load_basic_system_bindings(): """ Basic system bindings (For both Emacs and Vi mode.) """ registry = Registry() suspend_supported = Condition( lambda cli: suspend_to_background_supported()) @registry.add_binding(Keys.ControlZ, filter=suspend_supported) def _(event): """ Suspend process to background. """ event.cli.suspend_to_background() return registry def load_auto_suggestion_bindings(): """ Key bindings for accepting auto suggestion text. """ registry = Registry() handle = registry.add_binding suggestion_available = Condition( lambda cli: cli.current_buffer.suggestion is not None and cli.current_buffer.document.is_cursor_at_the_end) @handle(Keys.ControlF, filter=suggestion_available) @handle(Keys.ControlE, filter=suggestion_available) @handle(Keys.Right, filter=suggestion_available) def _(event): " Accept suggestion. " b = event.current_buffer suggestion = b.suggestion if suggestion: b.insert_text(suggestion.text) return registry
{ "content_hash": "adb335af040b113087e5f7bddf3a9539", "timestamp": "", "source": "github", "line_count": 406, "max_line_length": 100, "avg_line_length": 32.74384236453202, "alnum_prop": 0.6188506092974274, "repo_name": "ammarkhann/FinalSeniorCode", "id": "401135dec06e600795db4381691d3c757f5c0d2d", "size": "13331", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "lib/python2.7/site-packages/prompt_toolkit/key_binding/bindings/basic.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "229289" }, { "name": "C++", "bytes": "171536" }, { "name": "CSS", "bytes": "928345" }, { "name": "Fortran", "bytes": "14107" }, { "name": "HTML", "bytes": "853239" }, { "name": "JavaScript", "bytes": "4838516" }, { "name": "Jupyter Notebook", "bytes": "518186" }, { "name": "Makefile", "bytes": "214" }, { "name": "Matlab", "bytes": "4346" }, { "name": "Python", "bytes": "81804894" }, { "name": "Roff", "bytes": "6673" }, { "name": "Shell", "bytes": "3409" }, { "name": "Smarty", "bytes": "28408" }, { "name": "TeX", "bytes": "1527" }, { "name": "XSLT", "bytes": "366202" } ], "symlink_target": "" }
from __future__ import absolute_import import six from datetime import datetime from django.utils import timezone from sentry.api.serializers import Serializer, register from sentry.models import Event, EventError @register(Event) class EventSerializer(Serializer): _reserved_keys = frozenset( ['sentry.interfaces.User', 'sdk', 'device', 'contexts']) def _get_entries(self, event, user, is_public=False): # XXX(dcramer): These are called entries for future-proofing interface_list = [] for key, interface in six.iteritems(event.interfaces): # we treat user as a special contextual item if key in self._reserved_keys: continue data = interface.get_api_context(is_public=is_public) # data might not be returned for e.g. a public HTTP repr if not data: continue entry = { 'data': data, 'type': interface.get_alias(), } interface_list.append((interface, entry)) interface_list.sort( key=lambda x: x[0].get_display_score(), reverse=True) return [i[1] for i in interface_list] def get_attrs(self, item_list, user, is_public=False): Event.objects.bind_nodes(item_list, 'data') results = {} for item in item_list: user_interface = item.interfaces.get('sentry.interfaces.User') # TODO(dcramer): convert to get_api_context if user_interface: user_data = user_interface.to_json() else: user_data = None contexts_interface = item.interfaces.get('contexts') if contexts_interface: contexts_data = contexts_interface.get_api_context() else: contexts_data = {} sdk_interface = item.interfaces.get('sdk') if sdk_interface: sdk_data = sdk_interface.get_api_context() else: sdk_data = None results[item] = { 'entries': self._get_entries(item, user, is_public=is_public), 'user': user_data, 'contexts': contexts_data, 'sdk': sdk_data, } return results def serialize(self, obj, attrs, user): errors = [] for error in obj.data.get('errors', []): message = EventError.get_message(error) error_result = { 'type': error['type'], 'message': message, 'data': {k: v for k, v in six.iteritems(error) if k != 'type'}, } errors.append(error_result) tags = sorted( [{ 'key': k.split('sentry:', 1)[-1], 'value': v } for k, v in obj.get_tags()], key=lambda x: x['key'] ) received = obj.data.get('received') if received: # Sentry at one point attempted to record invalid types here. # Remove after June 2 2016 try: received = datetime.utcfromtimestamp(received).replace( tzinfo=timezone.utc, ) except TypeError: received = None from sentry.event_manager import ( get_hashes_from_fingerprint, md5_from_hash, ) # TODO(dcramer): move release serialization here d = { 'id': six.text_type(obj.id), 'groupID': six.text_type(obj.group_id), 'eventID': six.text_type(obj.event_id), 'size': obj.size, 'entries': attrs['entries'], 'dist': obj.dist, # See GH-3248 'message': obj.get_legacy_message(), 'user': attrs['user'], 'contexts': attrs['contexts'], 'sdk': attrs['sdk'], # TODO(dcramer): move into contexts['extra'] 'context': obj.data.get('extra', {}), 'packages': obj.data.get('modules', {}), 'type': obj.get_event_type(), 'metadata': obj.get_event_metadata(), 'tags': tags, 'platform': obj.platform, 'dateCreated': obj.datetime, 'dateReceived': received, 'errors': errors, 'fingerprints': [ md5_from_hash(h) for h in get_hashes_from_fingerprint(obj, obj.data.get('fingerprint', ['{{ default }}'])) ], } return d class SharedEventSerializer(EventSerializer): def get_attrs(self, item_list, user): return super(SharedEventSerializer, self).get_attrs(item_list, user, is_public=True) def serialize(self, obj, attrs, user): result = super(SharedEventSerializer, self).serialize(obj, attrs, user) del result['context'] del result['contexts'] del result['user'] del result['tags'] del result['sdk'] del result['errors'] result['entries'] = [e for e in result['entries'] if e['type'] != 'breadcrumbs'] return result
{ "content_hash": "7799fc40702f5ef3ec9281280956ee0a", "timestamp": "", "source": "github", "line_count": 152, "max_line_length": 105, "avg_line_length": 34.14473684210526, "alnum_prop": 0.5240847784200385, "repo_name": "looker/sentry", "id": "6f4d90e23e0eb157601655586352f03cdce9fb23", "size": "5190", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/sentry/api/serializers/models/event.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "289931" }, { "name": "HTML", "bytes": "241322" }, { "name": "JavaScript", "bytes": "3112298" }, { "name": "Lua", "bytes": "65795" }, { "name": "Makefile", "bytes": "7048" }, { "name": "Python", "bytes": "36341504" }, { "name": "Ruby", "bytes": "204" }, { "name": "Shell", "bytes": "5701" } ], "symlink_target": "" }
import sys import json import logging from graphkit.util import read_uri, dump_fileobj from graphkit.util import GraphKitException log = logging.getLogger(__name__) def _fileobj(dump_file): if dump_file is None: return sys.stdout return dump_fileobj(dump_file) def save_dump(graph, dump_file): """ Save a dump of the current graph to an NQuads file. """ log.debug('Dumping to %r...', dump_file or 'stdout') fh = _fileobj(dump_file) graph.graph.serialize(fh, format='nquads') fh.close() def load_dump(graph, dump_file): """ Load an NQuads file into the current graph. """ log.debug('Loading from %r...', dump_file) fh = read_uri(dump_file) graph.graph.parse(fh, format='nquads') fh.close() def save_json_dump(graph, dump_file, types=[], depth=4): """ Generate a nested JSON dump of a set of objects. """ log.debug('Storing JSON dump to %r...', dump_file or 'stdout') if len(types): for alias in types: if alias not in graph.aliases: raise GraphKitException('No such type alias: %r' % alias) else: types = graph.aliases.keys() data = {} for name in types: objects = [o for o in graph.all(name, depth=depth)] data[name] = objects fh = _fileobj(dump_file) json.dump(data, fh, indent=2) fh.close()
{ "content_hash": "145c2493a570214dd92d2f779ff401a7", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 73, "avg_line_length": 27.6734693877551, "alnum_prop": 0.6290560471976401, "repo_name": "pudo/graphkit", "id": "8df975e4ed7bbd04a9c18031a15748b72197dbbc", "size": "1356", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "graphkit/dumps.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "492" }, { "name": "Python", "bytes": "12428" } ], "symlink_target": "" }
import binascii import mock from oslo_concurrency import processutils from nova import test from nova.virt.libvirt.storage import dmcrypt class LibvirtDmcryptTestCase(test.NoDBTestCase): def setUp(self): super(LibvirtDmcryptTestCase, self).setUp() self.CIPHER = 'cipher' self.KEY_SIZE = 256 self.NAME = 'disk' self.TARGET = dmcrypt.volume_name(self.NAME) self.PATH = '/dev/nova-lvm/instance_disk' self.KEY = bytes(bytearray(x for x in range(0, self.KEY_SIZE))) self.KEY_STR = binascii.hexlify(self.KEY).decode('utf-8') @mock.patch('nova.utils.execute') def test_create_volume(self, mock_execute): dmcrypt.create_volume(self.TARGET, self.PATH, self.CIPHER, self.KEY_SIZE, self.KEY) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'create', self.TARGET, self.PATH, '--cipher=' + self.CIPHER, '--key-size=' + str(self.KEY_SIZE), '--key-file=-', process_input=self.KEY_STR, run_as_root=True), ]) @mock.patch('nova.virt.libvirt.storage.dmcrypt.LOG') @mock.patch('nova.utils.execute') def test_create_volume_fail(self, mock_execute, mock_log): mock_execute.side_effect = processutils.ProcessExecutionError() self.assertRaises(processutils.ProcessExecutionError, dmcrypt.create_volume, self.TARGET, self.PATH, self.CIPHER, self.KEY_SIZE, self.KEY) self.assertEqual(1, mock_execute.call_count) self.assertEqual(1, mock_log.error.call_count) # error logged @mock.patch('nova.utils.execute') def test_delete_volume(self, mock_execute): dmcrypt.delete_volume(self.TARGET) mock_execute.assert_has_calls([ mock.call('cryptsetup', 'remove', self.TARGET, run_as_root=True), ]) @mock.patch('nova.virt.libvirt.storage.dmcrypt.LOG') @mock.patch('nova.utils.execute') def test_delete_volume_fail(self, mock_execute, mock_log): mock_execute.side_effect = processutils.ProcessExecutionError() self.assertRaises(processutils.ProcessExecutionError, dmcrypt.delete_volume, self.TARGET) self.assertEqual(1, mock_execute.call_count) self.assertEqual(1, mock_log.error.call_count) # error logged @mock.patch('nova.virt.libvirt.storage.dmcrypt.LOG') @mock.patch('nova.utils.execute') def test_delete_missing_volume(self, mock_execute, mock_log): mock_execute.side_effect = \ processutils.ProcessExecutionError(exit_code=4) dmcrypt.delete_volume(self.TARGET) self.assertEqual(1, mock_log.debug.call_count) self.assertEqual(0, mock_log.error.call_count) @mock.patch('os.listdir') def test_list_volumes(self, mock_listdir): mock_listdir.return_value = [self.TARGET, '/dev/mapper/disk'] encrypted_volumes = dmcrypt.list_volumes() self.assertEqual(1, mock_listdir.call_count) self.assertEqual([self.TARGET], encrypted_volumes)
{ "content_hash": "c9d562209a70dafb4ae8740f06d41656", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 77, "avg_line_length": 38.353658536585364, "alnum_prop": 0.6391096979332274, "repo_name": "jianghuaw/nova", "id": "a09ee3931953af8ea85d54ba2a6637cd53f772a9", "size": "3819", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "1435" }, { "name": "PHP", "bytes": "32515" }, { "name": "Python", "bytes": "19932348" }, { "name": "Shell", "bytes": "28290" }, { "name": "Smarty", "bytes": "339635" } ], "symlink_target": "" }
import codecs from datetime import datetime from optparse import OptionParser import random import re import time import cookielib import urllib import urllib2 import logging from BeautifulSoup import BeautifulSoup, NavigableString class Message: def __init__(self, thread_url, sender, recipient, timestamp, subject, content, thunderbird=False): self.thread_url = thread_url self.sender = sender self.recipient = recipient self.timestamp = timestamp self.subject = subject self.content = content self.thunderbird = thunderbird def __str__(self): if self.thunderbird: msglength = len(self.content) subject="OKC Message, length = " + str(msglength).zfill(4) # leading zeros for message length return """ From - %s From: %s To: %s Subject: %s %s URL: %s """ % ( self.timestamp.strftime('%a %b %d %H:%M:%S %Y') if self.timestamp else None, self.sender, self.recipient, subject, self.content, self.thread_url) else: return """ URL: %s From: %s To: %s Date: %s Subject: %s Content-Length: %d %s """ % ( self.thread_url, self.sender, self.recipient, self.timestamp, self.subject.strip() if self.subject else None, len(self.content), self.content ) class MessageMissing(Message): def __init__(self, thread_url): self.thread_url = thread_url self.sender = None self.recipient = None self.timestamp = None self.subject = None self.content = "ERROR: message(s) not fetched" self.thunderbird = False class ArrowFetcher: secure_base_url = 'https://www.okcupid.com' sleep_duration = 2.0 # base time to wait after each HTTP request, but this will be adjusted randomly encoding_pairs = [('<br />', '\n'), ('&#35;', '#'), ('&amp;', '&'), ('&#38;', '&'), ('&#38;amp;', '&'), ('&lt;', '<'), ('&gt;', '>'), ('&quot;', '"'), ('&#38;quot;', '"'), ('&#39;', "'"), ('&rsquo;', u'\u2019'), ('&mdash;', "--")] def __init__(self, username, thunderbird=False, debug=False): self.username = username self.thunderbird = thunderbird self.debug = debug self.thread_urls = [] def _safely_soupify(self, f): f = f.partition("function autocoreError")[0] + '</body></html>' # wtf okc with the weirdly encoded "</scr' + 'ipt>'"-type statements in your javascript return(BeautifulSoup(f)) def _request_read_sleep(self, url): f = urllib2.urlopen(url).read() time.sleep(abs(self.sleep_duration + (random.randrange(-100, 100)/100.0))) return f def queue_threads(self): self.thread_urls = [] try: for folder in range(1, 4): # Inbox, Sent, Smiles page = 0 while (page < 1 if self.debug else True): logging.info("Queuing folder %s, page %s", folder, page) f = self._request_read_sleep(self.secure_base_url + '/messages?folder=' + str(folder) + '&low=' + str((page * 30) + 1)) soup = self._safely_soupify(f) end_pattern = re.compile('&folder=\d\';') threads = [ re.sub(end_pattern, '', li.find('a', {'class': 'open'} )['href'].partition('&folder=')[0]) for li in soup.find('ul', {'id': 'messages'}).findAll('li') ] if len(threads) == 0: # break out of the infinite loop when we reach the end and there are no threads on the page break else: self.thread_urls.extend(threads) page = page + 1 except AttributeError: logging.error("There was an error queuing the threads to download - are you sure your username and password are correct?") def dedupe_threads(self): if self.thread_urls: logging.debug("Removing duplicate URLs") self.thread_urls = list(set(self.thread_urls)) def fetch_threads(self): self.messages = [] for thread_url in self.thread_urls: try: thread_messages = self._fetch_thread(thread_url) except Exception as e: thread_messages = [MessageMissing(self.secure_base_url + thread_url)] logging.error("Fetch thread failed for URL: %s with error %s", thread_url, e) self.messages.extend(thread_messages) def write_messages(self, file_name): self.messages.sort(key = lambda message: (message.thread_url, message.timestamp)) # sort by sender, then time f = codecs.open(file_name, encoding='utf-8', mode='w') # ugh, otherwise i think it will try to write ascii for message in self.messages: logging.debug("Writing message for thread: " + message.thread_url) f.write(unicode(message)) f.close() def _fetch_thread(self, thread_url): message_list = [] logging.info("Fetching thread: " + self.secure_base_url + thread_url) f = self._request_read_sleep(self.secure_base_url + thread_url) soup = self._safely_soupify(f) try: subject = soup.find('strong', {'id': 'message_heading'}).contents[0] subject = unicode(subject) for find, replace in self.encoding_pairs: subject = subject.replace(unicode(find), unicode(replace)) except AttributeError: subject = unicode('') try: other_user = soup.find('input', {'name': 'buddyname'}).get('value') except AttributeError: try: # messages from OkCupid itself are a special case other_user = soup.find('ul', {'id': 'thread'}).find('div', 'signature').contents[0].partition('Message from ')[2] except AttributeError: other_user = '' for message in soup.find('ul', {'id': 'thread'}).findAll('li'): message_type = re.sub(r'_.*$', '', message.get('id', 'unknown')) logging.debug("Raw message (type: %s): %s", type(message), message) body_contents = message.find('div', 'message_body') if not body_contents and message_type == 'deleted': body_contents = message if body_contents: logging.debug("Message (type: %s): %s", message_type, body_contents) body = self._strip_tags(body_contents.renderContents().decode('UTF-8')).strip() logging.debug("Message after tag removing: %s", body) for find, replace in self.encoding_pairs: body = body.replace(unicode(find), unicode(replace)) logging.debug("Message after HTML entity conversion: %s", body) if message_type in ['broadcast', 'deleted', 'quiver']: # TODO: make a better "guess" about the time of the broadcast, account deletion, or Quiver match. # Perhaps get the time of the next message/reply (there should be at least one), and set the time based on it. timestamp = datetime(2000, 1, 1, 12, 0) else: fancydate_js = message.find('span', 'timestamp').find('script').string timestamp = datetime.fromtimestamp(int(fancydate_js.split(', ')[1])) sender = other_user recipient = self.username try: if message['class'].replace('preview', '').strip() == 'from_me': recipient = other_user sender = self.username except KeyError: pass logging.debug("Body: %s", body) message_list.append(Message(self.secure_base_url + thread_url, unicode(sender), unicode(recipient), timestamp, subject, body, thunderbird=self.thunderbird)) else: continue # control elements are also <li>'s in their html, so non-messages return message_list # http://stackoverflow.com/questions/1765848/remove-a-tag-using-beautifulsoup-but-keep-its-contents/1766002#1766002 def _strip_tags(self, html, invalid_tags=['em', 'a', 'span', 'strong', 'div', 'p']): soup = BeautifulSoup(html) for tag in soup.findAll(True): if tag.name in invalid_tags: s = "" for c in tag.contents: if not isinstance(c, NavigableString): c = self._strip_tags(unicode(c), invalid_tags) s += unicode(c).strip() else: s += unicode(c) tag.replaceWith(s) return soup.renderContents().decode('UTF-8') class OkcupidState: def __init__(self, username, filename, thunderbird, debug): self.username = username self.filename = filename self.thunderbird = thunderbird self.debug = debug self.cookie_jar = cookielib.CookieJar() self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar)) urllib2.install_opener(self.opener) def _setOpenerUrl(self, url, params=None): f = self.opener.open(url, params) f.close() logging.debug("Cookie jar: %s", self.cookie_jar) def fetch(self): arrow_fetcher = ArrowFetcher( self.username, thunderbird=self.thunderbird, debug=self.debug) arrow_fetcher.queue_threads() arrow_fetcher.dedupe_threads() try: arrow_fetcher.fetch_threads() arrow_fetcher.write_messages(self.filename) except KeyboardInterrupt: if self.debug: # Write progress so far to the output file if we're debugging arrow_fetcher.write_messages(self.filename) raise KeyboardInterrupt def use_password(self, password): logging.debug("Using password.") params = urllib.urlencode(dict(username=self.username, password=password)) self._setOpenerUrl(ArrowFetcher.secure_base_url + '/login', params) def use_autologin(self, autologin): logging.debug("Using autologin url: %s", autologin) self._setOpenerUrl(autologin) def main(): parser = OptionParser() parser.add_option("-u", "--username", dest="username", help="your OkCupid username") parser.add_option("-p", "--password", dest="password", help="your OkCupid password") parser.add_option("-f", "--filename", dest="filename", help="the file to which you want to write the data") parser.add_option("-t", "--thunderbird", dest="thunderbird", help="format output for Thunderbird rather than as plaintext", action='store_const', const=True, default=False) parser.add_option("-d", "--debug", dest="debug", help="limit the number of threads fetched for debugging", action='store_const', const=True, default=False) parser.add_option("-a", "--autologin", dest="autologin", help="a link from an OkCupid email, which contains your login credentials. Use instead of a password.") (options, args) = parser.parse_args() options_ok = True logging_format = '%(levelname)s: %(message)s' if options.debug: logging.basicConfig(format=logging_format, level=logging.DEBUG) logging.debug("Debug mode turned on.") else: logging.basicConfig(format=logging_format, level=logging.INFO) if not options.username: logging.error("Please specify your OkCupid username with either '-u' or '--username'") if not options.autologin and not options.password: logging.error("Please specify your OkCupid password with either '-p' or '--password'") options_ok = False if options.autologin and options.password: logging.error("Don't specify both autologin and password") options_ok = False if not options.filename: logging.error("Please specify the destination file with either '-f' or '--filename'") options_ok = False if options_ok: state = OkcupidState(options.username, options.filename, options.thunderbird, options.debug) if options.username and options.password: state.use_password(options.password) if options.autologin: state.use_autologin(options.autologin) state.fetch() logging.info("Done.") if __name__ == '__main__': main()
{ "content_hash": "0593c3471eae131109d744229a75614a", "timestamp": "", "source": "github", "line_count": 313, "max_line_length": 159, "avg_line_length": 42.82747603833866, "alnum_prop": 0.5490488623647892, "repo_name": "hickford/OkCupid-Message-Downloader", "id": "31cfb9aad0913b9d58a4d294d0e13c0e2efe19c3", "size": "13446", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "okc_arrow_fetcher.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "13446" }, { "name": "Shell", "bytes": "923" } ], "symlink_target": "" }
from nipype.testing import assert_equal from nipype.interfaces.fsl.maths import MeanImage def test_MeanImage_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='-%smean', position=4, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(nohash=True, ), ) inputs = MeanImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MeanImage_outputs(): output_map = dict(out_file=dict(), ) outputs = MeanImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
{ "content_hash": "e4ec5f2adcf9b8dd4334b4c759194325", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 78, "avg_line_length": 24.641509433962263, "alnum_prop": 0.6179173047473201, "repo_name": "Leoniela/nipype", "id": "252fd711c18aee3201c6941099e92efc1aec7a18", "size": "1360", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "nipype/interfaces/fsl/tests/test_auto_MeanImage.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "9823" }, { "name": "KiCad", "bytes": "3797" }, { "name": "Makefile", "bytes": "1854" }, { "name": "Matlab", "bytes": "5018" }, { "name": "Python", "bytes": "3767360" }, { "name": "Tcl", "bytes": "43408" } ], "symlink_target": "" }
import string intab = "abcdefghijklmnopqrstuwxyz0123456789" outtab = "cdefghijklmnopqrstuwxyzab2345678901" s = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj." print(s.translate(string.maketrans(intab, outtab))) print("http://www.pythonchallenge.com/pc/def/map.html".translate(string.maketrans(intab, outtab)))
{ "content_hash": "bc0a5bd488159de2ba016f53d2b82e62", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 209, "avg_line_length": 67.14285714285714, "alnum_prop": 0.7872340425531915, "repo_name": "exu/poligon", "id": "0b33edc93a53a8806bc902fbb6a745daca114afe", "size": "470", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/pythonchallenge/01.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "144" }, { "name": "ActionScript", "bytes": "1240753" }, { "name": "C", "bytes": "6966" }, { "name": "C#", "bytes": "123" }, { "name": "C++", "bytes": "1210" }, { "name": "CSS", "bytes": "467133" }, { "name": "Emacs Lisp", "bytes": "32341" }, { "name": "Go", "bytes": "21406" }, { "name": "JavaScript", "bytes": "2667554" }, { "name": "Lua", "bytes": "522" }, { "name": "Makefile", "bytes": "68" }, { "name": "PHP", "bytes": "486928" }, { "name": "Perl", "bytes": "717" }, { "name": "Python", "bytes": "320028" }, { "name": "Ruby", "bytes": "17923" }, { "name": "Scala", "bytes": "2093" }, { "name": "Scilab", "bytes": "0" }, { "name": "Shell", "bytes": "10730" }, { "name": "VimL", "bytes": "3867" } ], "symlink_target": "" }
import os from ..layers.wrappers import Wrapper from ..models import Sequential try: # pydot-ng is a fork of pydot that is better maintained import pydot_ng as pydot except ImportError: # fall back on pydot if necessary import pydot if not pydot.find_graphviz(): raise RuntimeError('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') def model_to_dot(model, show_shapes=False, show_layer_names=True): dot = pydot.Dot() dot.set('rankdir', 'TB') dot.set('concentrate', True) dot.set_node_defaults(shape='record') if isinstance(model, Sequential): if not model.built: model.build() model = model.model layers = model.layers # Create graph nodes. for layer in layers: layer_id = str(id(layer)) # Append a wrapped layer's label to node's label, if it exists. layer_name = layer.name class_name = layer.__class__.__name__ if isinstance(layer, Wrapper): layer_name = '{}({})'.format(layer_name, layer.layer.name) child_class_name = layer.layer.__class__.__name__ class_name = '{}({})'.format(class_name, child_class_name) # Create node's label. if show_layer_names: label = '{}: {}'.format(layer_name, class_name) else: label = class_name # Rebuild the label as a table including input/output shapes. if show_shapes: try: outputlabels = str(layer.output_shape) except: outputlabels = 'multiple' if hasattr(layer, 'input_shape'): inputlabels = str(layer.input_shape) elif hasattr(layer, 'input_shapes'): inputlabels = ', '.join( [str(ishape) for ishape in layer.input_shapes]) else: inputlabels = 'multiple' label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels, outputlabels) node = pydot.Node(layer_id, label=label) dot.add_node(node) # Connect nodes with edges. for layer in layers: layer_id = str(id(layer)) for i, node in enumerate(layer.inbound_nodes): node_key = layer.name + '_ib-' + str(i) if node_key in model.container_nodes: for inbound_layer in node.inbound_layers: inbound_layer_id = str(id(inbound_layer)) layer_id = str(id(layer)) dot.add_edge(pydot.Edge(inbound_layer_id, layer_id)) return dot def plot(model, to_file='model.png', show_shapes=False, show_layer_names=True): dot = model_to_dot(model, show_shapes, show_layer_names) _, format = os.path.splitext(to_file) if not format: format = 'png' else: format = format[1:] dot.write(to_file, format=format)
{ "content_hash": "f8a4c8f90189aabc35db768350aee176", "timestamp": "", "source": "github", "line_count": 85, "max_line_length": 92, "avg_line_length": 34.44705882352941, "alnum_prop": 0.5744535519125683, "repo_name": "marcsans/cnn-physics-perception", "id": "e66a52d872f5efbf4a175570175c958cc8558cec", "size": "2928", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "phy/lib/python2.7/site-packages/keras/utils/visualize_util.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "489272" }, { "name": "C++", "bytes": "3521811" }, { "name": "CSS", "bytes": "7132" }, { "name": "Cuda", "bytes": "232079" }, { "name": "FORTRAN", "bytes": "9868" }, { "name": "HTML", "bytes": "131419" }, { "name": "JavaScript", "bytes": "23881" }, { "name": "Jupyter Notebook", "bytes": "16254" }, { "name": "Makefile", "bytes": "75861" }, { "name": "Matlab", "bytes": "4346" }, { "name": "Objective-C", "bytes": "567" }, { "name": "Python", "bytes": "36682149" }, { "name": "Shell", "bytes": "3878" }, { "name": "TeX", "bytes": "14053" } ], "symlink_target": "" }
import os import rospy import subprocess from std_msgs.msg import Byte from lg_common.helpers import run_with_influx_exception_handler DEVNULL = open(os.devnull, 'w') CLEAR_BUTTON = 2 RELAUNCH_COMMAND = "pkill -f chrome" NODE_NAME = 'rfreceiver_relaunch' class RfreceiverAction: def __init__(self): rospy.init_node(NODE_NAME) self.clear_button_message = rospy.get_param('~clear_button_message', 2) def handle_button_msg(self, msg): if msg.data == self.clear_button_message: subprocess.call( RELAUNCH_COMMAND.split(' '), stdout=DEVNULL, stderr=DEVNULL ) def main(self): rospy.Subscriber( '/rfreceiver/buttondown', Byte, self.handle_button_msg ) rospy.spin() if __name__ == '__main__': run_with_influx_exception_handler(RfreceiverAction().main, NODE_NAME)
{ "content_hash": "89c0379b01efc73fbd133e24de91fb2f", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 79, "avg_line_length": 24.605263157894736, "alnum_prop": 0.6064171122994653, "repo_name": "EndPointCorp/lg_ros_nodes", "id": "9da8bd6239b4178dd55e61603e7addd55598ec9c", "size": "958", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lg_rfreceiver/scripts/actuator.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "28157" }, { "name": "C++", "bytes": "291289" }, { "name": "CMake", "bytes": "26675" }, { "name": "Dockerfile", "bytes": "15931" }, { "name": "HTML", "bytes": "29662" }, { "name": "JavaScript", "bytes": "430737" }, { "name": "Makefile", "bytes": "4197" }, { "name": "Python", "bytes": "1144931" }, { "name": "Shell", "bytes": "17851" } ], "symlink_target": "" }
from updateImat import updateIMAT from getf2fhttp import getf2fhttpEvents from getAgendaEvents import getAgendaEvents import settings from updateGoogleCalendar import updateGoogleCalendar import time from sendEmail import sendEmail from getf2fExcel import getf2fExcelEvents from utils import sortEvents settings = settings.Settings() def processOnce(settings): info = '' try: if settings.defined('f2fScheduleURL'): events = getf2fhttpEvents(settings) elif settings.defined('f2fExcelFile'): events = getf2fExcelEvents(settings) elif settings.defined('agendaExcelFile'): events = getAgendaEvents(settings) else: assert (False),"No source for F2F or Agenda schedule" # Update IMAT if settings.defined('imatUser'): info += updateIMAT(settings, events) # Update the online Google Calendar if settings.defined('calendarID'): info += updateGoogleCalendar(settings, events) except KeyboardInterrupt: raise # Pass exception on to main loop except: if settings.loop: # Summarise the exception, but keep executing import sys info = "Unexpected exception:" + str(sys.exc_info()) else: raise # Report the exception and break execution if len(info) > 0: if not settings.update: info = "WARNING: Dry run. No changes made. \n" + info print info sendEmail(settings, info) def processLoop(settings): try: # Main program loop until keyboard interrupt while True: processOnce(settings) if not settings.loop: print "Exiting single loop in 5s" time.sleep(5) break time.sleep(10 * 60) except KeyboardInterrupt: # ^C pressed print ('Interrupted') import sys sys.exit() if __name__ == '__main__': processLoop(settings)
{ "content_hash": "4a2ac9d9d4b7a455852d004fefbdfcf4", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 67, "avg_line_length": 26.345238095238095, "alnum_prop": 0.5684591052869408, "repo_name": "adrian-stephens/schedule", "id": "7381acea10561a43ad04fde468e8ca88aeed9f55", "size": "3304", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "82842" } ], "symlink_target": "" }
import subprocess from tempest.tests.lib import base class TestTempestListPlugins(base.TestCase): def test_run_list_plugins(self): return_code = subprocess.call( ['tempest', 'list-plugins'], stdout=subprocess.PIPE) self.assertEqual(return_code, 0)
{ "content_hash": "2d9620f2b911905d1216f0a70ff193b6", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 64, "avg_line_length": 28.3, "alnum_prop": 0.696113074204947, "repo_name": "HybridF5/tempest_debug", "id": "782dde74169cd8ca8da5a4cc20774cb40d02b8ae", "size": "890", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tempest/tests/cmd/test_list_plugins.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "3634721" }, { "name": "Shell", "bytes": "8175" } ], "symlink_target": "" }
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """ import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available logger = logging.getLogger(__name__) @dataclass class InputExample: """ A single training/test example for token classification. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ guid: str words: List[str] labels: Optional[List[str]] @dataclass class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. """ input_ids: List[int] attention_mask: List[int] token_type_ids: Optional[List[int]] = None label_ids: Optional[List[int]] = None class Split(Enum): train = "train" dev = "dev" test = "test" class TokenClassificationTask: @staticmethod def read_examples_from_file(data_dir, mode: Union[Split, str]) -> List[InputExample]: raise NotImplementedError @staticmethod def get_labels(path: str) -> List[str]: raise NotImplementedError @staticmethod def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-100, sequence_a_segment_id=0, mask_padding_with_zero=True, ) -> List[InputFeatures]: """Loads a data file into a list of `InputFeatures` `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ # TODO clean up all this to leverage built-in features of tokenizers label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in enumerate(examples): if ex_index % 10_000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] label_ids = [] for word, label in zip(example.words, example.labels): word_tokens = tokenizer.tokenize(word) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(word_tokens) > 0: tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = tokenizer.num_special_tokens_to_add() if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) if "token_type_ids" not in tokenizer.model_input_names: segment_ids = None features.append( InputFeatures( input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class TokenClassificationDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index # Use cross entropy ignore_index as padding label id so that only # real label ids contribute to the loss later. def __init__( self, token_classification_task: TokenClassificationTask, data_dir: str, tokenizer: PreTrainedTokenizer, labels: List[str], model_type: str, max_seq_length: Optional[int] = None, overwrite_cache=False, mode: Split = Split.train, ): # Load data features from cache or dataset file cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length)), ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") examples = token_classification_task.read_examples_from_file(data_dir, mode) # TODO clean up all this to leverage built-in features of tokenizers self.features = token_classification_task.convert_examples_to_features( examples, labels, max_seq_length, tokenizer, cls_token_at_end=bool(model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(tokenizer.padding_side == "left"), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, ) logger.info(f"Saving features into cached file {cached_features_file}") torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class TFTokenClassificationDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] pad_token_label_id: int = -100 # Use cross entropy ignore_index as padding label id so that only # real label ids contribute to the loss later. def __init__( self, token_classification_task: TokenClassificationTask, data_dir: str, tokenizer: PreTrainedTokenizer, labels: List[str], model_type: str, max_seq_length: Optional[int] = None, overwrite_cache=False, mode: Split = Split.train, ): examples = token_classification_task.read_examples_from_file(data_dir, mode) # TODO clean up all this to leverage built-in features of tokenizers self.features = token_classification_task.convert_examples_to_features( examples, labels, max_seq_length, tokenizer, cls_token_at_end=bool(model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(tokenizer.padding_side == "left"), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: self.dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), ( {"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([None]), ), ) else: self.dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), }, tf.TensorShape([None]), ), ) def get_dataset(self): self.dataset = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features))) return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i]
{ "content_hash": "af3dfaaca70793db875dddf2edebbd05", "timestamp": "", "source": "github", "line_count": 357, "max_line_length": 160, "avg_line_length": 41.83473389355742, "alnum_prop": 0.540140609306997, "repo_name": "huggingface/transformers", "id": "35fcb5ef5b7d22317a8db2c92b6eea9035b61d80", "size": "15644", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "examples/legacy/token-classification/utils_ner.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "6021" }, { "name": "C++", "bytes": "12959" }, { "name": "Cuda", "bytes": "175419" }, { "name": "Dockerfile", "bytes": "18218" }, { "name": "Jsonnet", "bytes": "937" }, { "name": "Makefile", "bytes": "3430" }, { "name": "Python", "bytes": "35742012" }, { "name": "Shell", "bytes": "30374" } ], "symlink_target": "" }
import time import uuid from unittest import skipIf from django.contrib.auth.models import User from django.core.management import call_command from django.test import TestCase, override_settings try: from django.urls import reverse except ImportError: from django.core.urlresolvers import reverse from django.test.client import Client from django.conf import settings from mock import patch, PropertyMock from rq import get_current_job, Queue from rq.job import Job from rq.registry import (DeferredJobRegistry, FinishedJobRegistry, StartedJobRegistry) from rq.worker import Worker from django_rq.decorators import job from django_rq.jobs import get_job_class from django_rq.queues import ( get_connection, get_queue, get_queue_by_index, get_queues, get_unique_connection_configs, DjangoRQ ) from django_rq import thread_queue from django_rq.templatetags.django_rq import to_localtime from django_rq.workers import (get_worker, get_worker_class, collect_workers_by_connection, get_all_workers_by_configuration) try: from rq_scheduler import Scheduler from ..queues import get_scheduler RQ_SCHEDULER_INSTALLED = True except ImportError: RQ_SCHEDULER_INSTALLED = False QUEUES = settings.RQ_QUEUES def access_self(): return get_current_job().id def divide(a, b): return a / b def long_running_job(timeout=10): time.sleep(timeout) return 'Done sleeping...' def get_failed_queue_index(name='default'): """ Returns the position of FailedQueue for the named queue in QUEUES_LIST """ # Get the index of FailedQueue for 'default' Queue in QUEUES_LIST queue_index = None connection = get_connection(name) connection_kwargs = connection.connection_pool.connection_kwargs for i in range(0, 100): q = get_queue_by_index(i) if q.name == 'failed' and q.connection.connection_pool.connection_kwargs == connection_kwargs: queue_index = i break return queue_index def get_queue_index(name='default'): """ Returns the position of Queue for the named queue in QUEUES_LIST """ queue_index = None connection = get_connection(name) connection_kwargs = connection.connection_pool.connection_kwargs for i in range(0, 100): q = get_queue_by_index(i) if q.name == name and q.connection.connection_pool.connection_kwargs == connection_kwargs: queue_index = i break return queue_index class RqstatsTest(TestCase): def test_get_connection_default(self): """ Test that rqstats returns the right statistics """ # Override testing RQ_QUEUES queues = [{ 'connection_config': { 'DB': 0, 'HOST': 'localhost', 'PORT': 6379, }, 'name': 'default' }] with patch('django_rq.utils.QUEUES_LIST', new_callable=PropertyMock(return_value=queues)): # Only to make sure it doesn't crash call_command('rqstats') call_command('rqstats', '-j') call_command('rqstats', '-y') @override_settings(RQ={'AUTOCOMMIT': True}) class QueuesTest(TestCase): def test_get_connection_default(self): """ Test that get_connection returns the right connection based for `defaut` queue. """ config = QUEUES['default'] connection = get_connection() connection_kwargs = connection.connection_pool.connection_kwargs self.assertEqual(connection_kwargs['host'], config['HOST']) self.assertEqual(connection_kwargs['port'], config['PORT']) self.assertEqual(connection_kwargs['db'], config['DB']) def test_get_connection_test(self): """ Test that get_connection returns the right connection based for `test` queue. """ config = QUEUES['test'] connection = get_connection('test') connection_kwargs = connection.connection_pool.connection_kwargs self.assertEqual(connection_kwargs['host'], config['HOST']) self.assertEqual(connection_kwargs['port'], config['PORT']) self.assertEqual(connection_kwargs['db'], config['DB']) def test_get_queue_default(self): """ Test that get_queue use the right parameters for `default` connection. """ config = QUEUES['default'] queue = get_queue('default') connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, 'default') self.assertEqual(connection_kwargs['host'], config['HOST']) self.assertEqual(connection_kwargs['port'], config['PORT']) self.assertEqual(connection_kwargs['db'], config['DB']) def test_get_queue_url(self): """ Test that get_queue use the right parameters for queues using URL for connection. """ config = QUEUES['url'] queue = get_queue('url') connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, 'url') self.assertEqual(connection_kwargs['host'], 'host') self.assertEqual(connection_kwargs['port'], 1234) self.assertEqual(connection_kwargs['db'], 4) self.assertEqual(connection_kwargs['password'], 'password') def test_get_queue_url_with_db(self): """ Test that get_queue use the right parameters for queues using URL for connection, where URL contains the db number (either as querystring or path segment). """ config = QUEUES['url_with_db'] queue = get_queue('url_with_db') connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, 'url_with_db') self.assertEqual(connection_kwargs['host'], 'host') self.assertEqual(connection_kwargs['port'], 1234) self.assertEqual(connection_kwargs['db'], 5) self.assertEqual(connection_kwargs['password'], 'password') def test_get_queue_url_with_db_default(self): """ Test that get_queue use the right parameters for queues using URL for connection, where no DB given and URL does not contain the db number (redis-py defaults to 0, should not break). """ config = QUEUES['url_default_db'] queue = get_queue('url_default_db') connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, 'url_default_db') self.assertEqual(connection_kwargs['host'], 'host') self.assertEqual(connection_kwargs['port'], 1234) self.assertEqual(connection_kwargs['db'], 0) self.assertEqual(connection_kwargs['password'], 'password') def test_get_queue_test(self): """ Test that get_queue use the right parameters for `test` connection. """ config = QUEUES['test'] queue = get_queue('test') connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, 'test') self.assertEqual(connection_kwargs['host'], config['HOST']) self.assertEqual(connection_kwargs['port'], config['PORT']) self.assertEqual(connection_kwargs['db'], config['DB']) def test_get_queues_same_connection(self): """ Checks that getting queues with the same redis connection is ok. """ self.assertEqual(get_queues('test', 'test2'), [get_queue('test'), get_queue('test2')]) def test_get_queues_different_connections(self): """ Checks that getting queues with different redis connections raise an exception. """ self.assertRaises(ValueError, get_queues, 'default', 'test') def test_get_queues_different_classes(self): """ Checks that getting queues with different classes (defined in configuration) raises an exception. """ self.assertRaises(ValueError, get_queues, 'test', 'test1') def test_pass_queue_via_commandline_args(self): """ Checks that passing queues via commandline arguments works """ queue_names = ['django_rq_test', 'django_rq_test2'] jobs = [] for queue_name in queue_names: queue = get_queue(queue_name) jobs.append({ 'job': queue.enqueue(divide, 42, 1), 'finished_job_registry': FinishedJobRegistry(queue.name, queue.connection), }) call_command('rqworker', *queue_names, burst=True) for job in jobs: self.assertTrue(job['job'].is_finished) self.assertIn(job['job'].id, job['finished_job_registry'].get_job_ids()) def test_get_unique_connection_configs(self): connection_params_1 = { 'HOST': 'localhost', 'PORT': 6379, 'DB': 0, } connection_params_2 = { 'HOST': 'localhost', 'PORT': 6379, 'DB': 1, } config = { 'default': connection_params_1, 'test': connection_params_2 } unique_configs = get_unique_connection_configs(config) self.assertEqual(len(unique_configs), 2) self.assertIn(connection_params_1, unique_configs) self.assertIn(connection_params_2, unique_configs) # self.assertEqual(get_unique_connection_configs(config), # [connection_params_1, connection_params_2]) config = { 'default': connection_params_1, 'test': connection_params_1 } # Should return one connection config since it filters out duplicates self.assertEqual(get_unique_connection_configs(config), [connection_params_1]) def test_get_unique_connection_configs_with_different_timeout(self): connection_params_1 = { 'HOST': 'localhost', 'PORT': 6379, 'DB': 0, } connection_params_2 = { 'HOST': 'localhost', 'PORT': 6379, 'DB': 1, } queue_params_a = dict(connection_params_1) queue_params_b = dict(connection_params_2) queue_params_c = dict(connection_params_2) queue_params_c["DEFAULT_TIMEOUT"] = 1 config = { 'default': queue_params_a, 'test_b': queue_params_b, 'test_c': queue_params_c, } unique_configs = get_unique_connection_configs(config) self.assertEqual(len(unique_configs), 2) self.assertIn(connection_params_1, unique_configs) self.assertIn(connection_params_2, unique_configs) def test_async(self): """ Checks whether asynchronous settings work """ # Make sure async is not set by default default_queue = get_queue('default') self.assertTrue(default_queue._async) # Make sure async override works default_queue_async = get_queue('default', async=False) self.assertFalse(default_queue_async._async) # Make sure async setting works async_queue = get_queue('async') self.assertFalse(async_queue._async) @override_settings(RQ={'AUTOCOMMIT': False}) def test_autocommit(self): """ Checks whether autocommit is set properly. """ queue = get_queue(autocommit=True) self.assertTrue(queue._autocommit) queue = get_queue(autocommit=False) self.assertFalse(queue._autocommit) # Falls back to default AUTOCOMMIT mode queue = get_queue() self.assertFalse(queue._autocommit) queues = get_queues(autocommit=True) self.assertTrue(queues[0]._autocommit) queues = get_queues(autocommit=False) self.assertFalse(queues[0]._autocommit) queues = get_queues() self.assertFalse(queues[0]._autocommit) def test_default_timeout(self): """Ensure DEFAULT_TIMEOUT are properly parsed.""" queue = get_queue() self.assertEqual(queue._default_timeout, 500) queue = get_queue('test1') self.assertEqual(queue._default_timeout, 400) @override_settings(RQ={'AUTOCOMMIT': True}) class DecoratorTest(TestCase): def test_job_decorator(self): # Ensure that decorator passes in the right queue from settings.py queue_name = 'test3' config = QUEUES[queue_name] @job(queue_name) def test(): pass result = test.delay() queue = get_queue(queue_name) self.assertEqual(result.origin, queue_name) result.delete() def test_job_decorator_default(self): # Ensure that decorator passes in the right queue from settings.py @job def test(): pass result = test.delay() self.assertEqual(result.origin, 'default') result.delete() def test_job_decorator_result_ttl_default(self): from rq.defaults import DEFAULT_RESULT_TTL @job def test(): pass result = test.delay() self.assertEqual(result.result_ttl, DEFAULT_RESULT_TTL) result.delete() @override_settings(RQ={'AUTOCOMMIT': True, 'DEFAULT_RESULT_TTL': 5432}) def test_job_decorator_result_ttl(self): @job def test(): pass result = test.delay() self.assertEqual(result.result_ttl, 5432) result.delete() @override_settings(RQ={'AUTOCOMMIT': True}) class WorkersTest(TestCase): def test_get_worker_default(self): """ By default, ``get_worker`` should return worker for ``default`` queue. """ worker = get_worker() queue = worker.queues[0] self.assertEqual(queue.name, 'default') def test_get_worker_specified(self): """ Checks if a worker with specified queues is created when queue names are given. """ w = get_worker('test') self.assertEqual(len(w.queues), 1) queue = w.queues[0] self.assertEqual(queue.name, 'test') def test_get_worker_custom_classes(self): w = get_worker('test', job_class='django_rq.tests.DummyJob', queue_class='django_rq.tests.DummyQueue', worker_class='django_rq.tests.DummyWorker') self.assertIs(w.job_class, DummyJob) self.assertIsInstance(w.queues[0], DummyQueue) self.assertIsInstance(w, DummyWorker) def test_get_current_job(self): """ Ensure that functions using RQ's ``get_current_job`` doesn't fail when run from rqworker (the job id is not in the failed queue). """ queue = get_queue() job = queue.enqueue(access_self) call_command('rqworker', '--burst') failed_queue = Queue(name='failed', connection=queue.connection) self.assertFalse(job.id in failed_queue.job_ids) job.delete() def test_collects_worker_various_connections_get_multiple_collection(self): queues = [ {'name': 'default', 'connection_config': settings.RQ_QUEUES['default']}, {'name': 'django_rq_test', 'connection_config': settings.RQ_QUEUES['django_rq_test']}, {'name': 'test3', 'connection_config': settings.RQ_QUEUES['test3']}, ] collections = collect_workers_by_connection(queues) self.assertEqual(len(collections), 2) @override_settings(RQ={'AUTOCOMMIT': True}) class ViewTest(TestCase): def setUp(self): self.user = User.objects.create_user('foo', password='pass') self.user.is_staff = True self.user.is_active = True self.user.save() self.client = Client() self.client.login(username=self.user.username, password='pass') get_queue('django_rq_test').connection.flushall() def test_requeue_job(self): """ Ensure that a failed job gets requeued when rq_requeue_job is called """ def failing_job(): raise ValueError queue = get_queue('default') queue_index = get_failed_queue_index('default') job = queue.enqueue(failing_job) worker = get_worker('default') worker.work(burst=True) job.refresh() self.assertTrue(job.is_failed) self.client.post(reverse('rq_requeue_job', args=[queue_index, job.id]), {'requeue': 'Requeue'}) self.assertIn(job, queue.jobs) job.delete() def test_delete_job(self): """ In addition to deleting job from Redis, the job id also needs to be deleted from Queue. """ queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) self.client.post(reverse('rq_delete_job', args=[queue_index, job.id]), {'post': 'yes'}) self.assertFalse(Job.exists(job.id, connection=queue.connection)) self.assertNotIn(job.id, queue.job_ids) def test_action_delete_jobs(self): queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') # enqueue some jobs job_ids = [] for _ in range(0, 3): job = queue.enqueue(access_self) job_ids.append(job.id) # remove those jobs using view self.client.post(reverse('rq_actions', args=[queue_index]), {'action': 'delete', 'job_ids': job_ids}) # check if jobs are removed for job_id in job_ids: self.assertFalse(Job.exists(job_id, connection=queue.connection)) self.assertNotIn(job_id, queue.job_ids) def test_action_requeue_jobs(self): def failing_job(): raise ValueError queue = get_queue('django_rq_test') failed_queue_index = get_failed_queue_index('django_rq_test') # enqueue some jobs that will fail jobs = [] job_ids = [] for _ in range(0, 3): job = queue.enqueue(failing_job) jobs.append(job) job_ids.append(job.id) # do those jobs = fail them worker = get_worker('django_rq_test') worker.work(burst=True) # check if all jobs are really failed for job in jobs: self.assertTrue(job.is_failed) # renqueue failed jobs from failed queue self.client.post(reverse('rq_actions', args=[failed_queue_index]), {'action': 'requeue', 'job_ids': job_ids}) # check if we requeue all failed jobs for job in jobs: self.assertFalse(job.is_failed) def test_clear_queue(self): """Test that the queue clear actually clears the queue.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) self.client.post(reverse('rq_clear', args=[queue_index]), {'post': 'yes'}) self.assertFalse(Job.exists(job.id, connection=queue.connection)) self.assertNotIn(job.id, queue.job_ids) def test_finished_jobs(self): """Ensure that finished jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) registry = FinishedJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_finished_jobs', args=[queue_index]) ) self.assertEqual(response.context['jobs'], [job]) def test_started_jobs(self): """Ensure that active jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) registry = StartedJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_started_jobs', args=[queue_index]) ) self.assertEqual(response.context['jobs'], [job]) def test_deferred_jobs(self): """Ensure that active jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) registry = DeferredJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_deferred_jobs', args=[queue_index]) ) self.assertEqual(response.context['jobs'], [job]) def test_get_all_workers(self): worker1 = get_worker() worker2 = get_worker('test') workers_collections = [ {'config': {'URL': 'redis://'}, 'all_workers': [worker1]}, {'config': {'URL': 'redis://localhost/1'}, 'all_workers': [worker2]}, ] result = get_all_workers_by_configuration({'URL': 'redis://'}, workers_collections) self.assertEqual(result, [worker1]) def test_workers(self): """Worker index page should show workers for a specific queue""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') worker1 = get_worker('django_rq_test', name=uuid.uuid4().hex) worker1.register_birth() worker2 = get_worker('test3') worker2.register_birth() response = self.client.get( reverse('rq_workers', args=[queue_index]) ) self.assertEqual(response.context['workers'], [worker1]) def test_worker_details(self): """Worker index page should show workers for a specific queue""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') worker = get_worker('django_rq_test', name=uuid.uuid4().hex) worker.register_birth() response = self.client.get( reverse('rq_worker_details', args=[queue_index, worker.key]) ) self.assertEqual(response.context['worker'], worker) def test_statistics_json_view(self): """ Django-RQ's statistic as JSON only viewable by staff or with API_TOKEN """ # Override testing RQ_QUEUES queues = [{ 'connection_config': { 'DB': 0, 'HOST': 'localhost', 'PORT': 6379, }, 'name': 'default' }] with patch('django_rq.utils.QUEUES_LIST', new_callable=PropertyMock(return_value=queues)): response = self.client.get(reverse('rq_home')) self.assertEqual(response.status_code, 200) response = self.client.get(reverse('rq_home_json')) self.assertEqual(response.status_code, 200) # Not staff, only token self.user.is_staff = False self.user.save() response = self.client.get(reverse('rq_home')) self.assertEqual(response.status_code, 302) # Error, but with 200 code response = self.client.get(reverse('rq_home_json')) self.assertEqual(response.status_code, 200) self.assertIn("error", response.content.decode('utf-8')) # With token, token = '12345abcde' with patch('django_rq.views.API_TOKEN', new_callable=PropertyMock(return_value=token)): response = self.client.get(reverse('rq_home_json', args=[token])) self.assertEqual(response.status_code, 200) self.assertIn("name", response.content.decode('utf-8')) self.assertNotIn('"error": true', response.content.decode('utf-8')) # Wrong token response = self.client.get(reverse('rq_home_json', args=["wrong_token"])) self.assertEqual(response.status_code, 200) self.assertNotIn("name", response.content.decode('utf-8')) self.assertIn('"error": true', response.content.decode('utf-8')) class ThreadQueueTest(TestCase): @override_settings(RQ={'AUTOCOMMIT': True}) def test_enqueue_autocommit_on(self): """ Running ``enqueue`` when AUTOCOMMIT is on should immediately persist job into Redis. """ queue = get_queue() job = queue.enqueue(divide, 1, 1) self.assertTrue(job.id in queue.job_ids) job.delete() @override_settings(RQ={'AUTOCOMMIT': False}) def test_enqueue_autocommit_off(self): """ Running ``enqueue`` when AUTOCOMMIT is off should put the job in the delayed queue instead of enqueueing it right away. """ queue = get_queue() job = queue.enqueue(divide, 1, b=1) self.assertTrue(job is None) delayed_queue = thread_queue.get_queue() self.assertEqual(delayed_queue[0][0], queue) self.assertEqual(delayed_queue[0][1], ()) kwargs = delayed_queue[0][2] self.assertEqual(kwargs['args'], (1,)) self.assertEqual(kwargs['result_ttl'], None) self.assertEqual(kwargs['kwargs'], {'b': 1}) self.assertEqual(kwargs['func'], divide) self.assertEqual(kwargs['timeout'], None) def test_commit(self): """ Ensure that commit_delayed_jobs properly enqueue jobs and clears delayed_queue. """ queue = get_queue() delayed_queue = thread_queue.get_queue() queue.empty() self.assertEqual(queue.count, 0) queue.enqueue_call(divide, args=(1,), kwargs={'b': 1}) thread_queue.commit() self.assertEqual(queue.count, 1) self.assertEqual(len(delayed_queue), 0) def test_clear(self): queue = get_queue() delayed_queue = thread_queue.get_queue() delayed_queue.append((queue, divide, (1,), {'b': 1})) thread_queue.clear() delayed_queue = thread_queue.get_queue() self.assertEqual(delayed_queue, []) @override_settings(RQ={'AUTOCOMMIT': False}) def test_success(self): queue = get_queue() queue.empty() thread_queue.clear() self.assertEqual(queue.count, 0) self.client.get(reverse('success')) self.assertEqual(queue.count, 1) @override_settings(RQ={'AUTOCOMMIT': False}) def test_error(self): queue = get_queue() queue.empty() self.assertEqual(queue.count, 0) url = reverse('error') self.assertRaises(ValueError, self.client.get, url) self.assertEqual(queue.count, 0) class SchedulerTest(TestCase): @skipIf(RQ_SCHEDULER_INSTALLED is False, 'RQ Scheduler not installed') def test_get_scheduler(self): """ Ensure get_scheduler creates a scheduler instance with the right connection params for `test` queue. """ config = QUEUES['test'] scheduler = get_scheduler('test') connection_kwargs = scheduler.connection.connection_pool.connection_kwargs self.assertEqual(scheduler.queue_name, 'test') self.assertEqual(connection_kwargs['host'], config['HOST']) self.assertEqual(connection_kwargs['port'], config['PORT']) self.assertEqual(connection_kwargs['db'], config['DB']) class RedisCacheTest(TestCase): @skipIf(settings.REDIS_CACHE_TYPE != 'django-redis', 'django-redis not installed') def test_get_queue_django_redis(self): """ Test that the USE_REDIS_CACHE option for configuration works. """ queueName = 'django-redis' queue = get_queue(queueName) connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, queueName) cacheHost = settings.CACHES[queueName]['LOCATION'].split(':')[0] cachePort = settings.CACHES[queueName]['LOCATION'].split(':')[1] cacheDBNum = settings.CACHES[queueName]['LOCATION'].split(':')[2] self.assertEqual(connection_kwargs['host'], cacheHost) self.assertEqual(connection_kwargs['port'], int(cachePort)) self.assertEqual(connection_kwargs['db'], int(cacheDBNum)) self.assertEqual(connection_kwargs['password'], None) @skipIf(settings.REDIS_CACHE_TYPE != 'django-redis-cache', 'django-redis-cache not installed') def test_get_queue_django_redis_cache(self): """ Test that the USE_REDIS_CACHE option for configuration works. """ queueName = 'django-redis-cache' queue = get_queue(queueName) connection_kwargs = queue.connection.connection_pool.connection_kwargs self.assertEqual(queue.name, queueName) cacheHost = settings.CACHES[queueName]['LOCATION'].split(':')[0] cachePort = settings.CACHES[queueName]['LOCATION'].split(':')[1] cacheDBNum = settings.CACHES[queueName]['OPTIONS']['DB'] self.assertEqual(connection_kwargs['host'], cacheHost) self.assertEqual(connection_kwargs['port'], int(cachePort)) self.assertEqual(connection_kwargs['db'], int(cacheDBNum)) self.assertEqual(connection_kwargs['password'], None) class DummyJob(Job): pass class JobClassTest(TestCase): def test_default_job_class(self): job_class = get_job_class() self.assertIs(job_class, Job) @override_settings(RQ={'JOB_CLASS': 'django_rq.tests.DummyJob'}) def test_custom_class(self): job_class = get_job_class() self.assertIs(job_class, DummyJob) def test_local_override(self): self.assertIs(get_job_class('django_rq.tests.DummyJob'), DummyJob) class DummyQueue(DjangoRQ): """Just Fake class for the following test""" class QueueClassTest(TestCase): def test_default_queue_class(self): queue = get_queue('test') self.assertIsInstance(queue, DjangoRQ) def test_for_queue(self): queue = get_queue('test1') self.assertIsInstance(queue, DummyQueue) def test_in_kwargs(self): queue = get_queue('test', queue_class=DummyQueue) self.assertIsInstance(queue, DummyQueue) class DummyWorker(Worker): pass class WorkerClassTest(TestCase): def test_default_worker_class(self): worker = get_worker('test') self.assertIsInstance(worker, Worker) @override_settings(RQ={'WORKER_CLASS': 'django_rq.tests.DummyWorker'}) def test_custom_class(self): worker = get_worker('test') self.assertIsInstance(worker, DummyWorker) def test_local_override(self): self.assertIs(get_worker_class('django_rq.tests.DummyWorker'), DummyWorker) @override_settings(RQ={'AUTOCOMMIT': True}) class TemplateTagTest(TestCase): def test_to_localtime(self): with self.settings(TIME_ZONE='Asia/Jakarta'): queue = get_queue() job = queue.enqueue(access_self) time = to_localtime(job.created_at) self.assertIsNotNone(time.tzinfo) self.assertEqual(time.strftime("%z"), '+0700')
{ "content_hash": "cdffc87df0dc1bbd2acb3da8e800cc28", "timestamp": "", "source": "github", "line_count": 874, "max_line_length": 102, "avg_line_length": 35.779176201373, "alnum_prop": 0.6095743660260305, "repo_name": "1024inc/django-rq", "id": "58188cc6087df554f86907b2a7c7a801dd873be4", "size": "31271", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django_rq/tests/tests.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "28218" }, { "name": "Makefile", "bytes": "173" }, { "name": "Python", "bytes": "96432" } ], "symlink_target": "" }
"""Tests for the learner goals.""" from __future__ import annotations from core import feconf from core.constants import constants from core.domain import learner_goals_services from core.domain import learner_progress_services from core.domain import topic_domain from core.domain import topic_services from core.tests import test_utils class LearnerGoalsHandlerTests(test_utils.GenericTestBase): OWNER_EMAIL = 'owner@example.com' OWNER_USERNAME = 'owner' TOPIC_ID_1 = 'Topic_id_1' TOPIC_NAME_1 = 'Topic name 1' TOPIC_ID_2 = 'Topic_id_2' TOPIC_NAME_2 = 'Topic name 2' TOPIC_ID_3 = 'Topic_id_3' TOPIC_NAME_3 = 'Topic name 3' TOPIC_ID_4 = 'Topic_id_4' TOPIC_NAME_4 = 'Topic name 4' subtopic_1 = topic_domain.Subtopic( 0, 'Title 1', ['skill_id_1'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-zero') subtopic_2 = topic_domain.Subtopic( 0, 'Title 1', ['skill_id_1'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-zero') subtopic_3 = topic_domain.Subtopic( 0, 'Title 1', ['skill_id_1'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-zero') subtopic_4 = topic_domain.Subtopic( 0, 'Title 1', ['skill_id_1'], 'image.svg', constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131, 'dummy-subtopic-zero' ) def setUp(self): super(LearnerGoalsHandlerTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.curriculum_admin_id = self.get_user_id_from_email( self.CURRICULUM_ADMIN_EMAIL) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) # Save the topics. self.save_new_topic( self.TOPIC_ID_1, self.owner_id, name=self.TOPIC_NAME_1, url_fragment='topic-one', description='A new topic', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[self.subtopic_1], next_subtopic_id=1) topic_services.publish_topic(self.TOPIC_ID_1, self.curriculum_admin_id) self.save_new_topic( self.TOPIC_ID_2, self.owner_id, name=self.TOPIC_NAME_2, url_fragment='topic-two', description='A new topic', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[self.subtopic_2], next_subtopic_id=1) topic_services.publish_topic(self.TOPIC_ID_2, self.curriculum_admin_id) self.save_new_topic( self.TOPIC_ID_3, self.owner_id, name=self.TOPIC_NAME_3, url_fragment='topic-three', description='A new topic', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[self.subtopic_3], next_subtopic_id=1) topic_services.publish_topic(self.TOPIC_ID_3, self.curriculum_admin_id) self.save_new_topic( self.TOPIC_ID_4, self.owner_id, name=self.TOPIC_NAME_4, url_fragment='topic-four', description='A new topic', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[self.subtopic_4], next_subtopic_id=1) topic_services.publish_topic(self.TOPIC_ID_4, self.curriculum_admin_id) def test_add_topic_to_learner_goal(self): self.login(self.VIEWER_EMAIL) csrf_token = self.get_new_csrf_token() # Add one topic to the learner goal. self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, self.TOPIC_ID_1), {}, csrf_token=csrf_token) self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1]) # Add another topic. self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, self.TOPIC_ID_2), {}, csrf_token=csrf_token) self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2]) # If a topic belongs to the completed list, it should not be added. learner_progress_services.mark_topic_as_learnt( self.viewer_id, self.TOPIC_ID_3) response = self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, self.TOPIC_ID_3), {}, csrf_token=csrf_token) self.assertEqual( response['belongs_to_learnt_list'], True) self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2]) # Fail to add one topic to the learner goal. response = self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, 'InvalidActivityType', self.TOPIC_ID_1), {}, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'Invalid activityType: InvalidActivityType') # Now we begin testing of not exceeding the limit of activities in the # learner goals. # Add feconf.MAX_CURRENT_GOALS_COUNT - 2 activities to reach # the maximum limit. for topic_id in range(2, feconf.MAX_CURRENT_GOALS_COUNT + 1): self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, 'topic_id_%s' % topic_id), {}, csrf_token=csrf_token) # Now if we try and add a topic we should get a message saying we # are exceeding the limit. response = self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, 'topic_id_%s' % str(feconf.MAX_CURRENT_GOALS_COUNT + 3)), {}, csrf_token=csrf_token) self.assertEqual(response['goals_limit_exceeded'], True) self.logout() def test_remove_topic_from_learner_goals(self): self.login(self.VIEWER_EMAIL) csrf_token = self.get_new_csrf_token() # Add topic to the learner goals. learner_progress_services.validate_and_add_topic_to_learn_goal( self.viewer_id, self.TOPIC_ID_1) learner_progress_services.validate_and_add_topic_to_learn_goal( self.viewer_id, self.TOPIC_ID_2) self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2]) # Remove an topic. self.delete_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, self.TOPIC_ID_1)) self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), [self.TOPIC_ID_2]) # Remove the second topic. self.delete_json('%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, self.TOPIC_ID_2)) self.assertEqual( learner_goals_services.get_all_topic_ids_to_learn( self.viewer_id), []) # Add one topic to the learner goal. self.post_json( '%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, constants.ACTIVITY_TYPE_LEARN_TOPIC, self.TOPIC_ID_1), {}, csrf_token=csrf_token) # Fail to delete one topic from learner goals. response = self.delete_json('%s/%s/%s' % ( feconf.LEARNER_GOALS_DATA_URL, 'InvalidActivityType', self.TOPIC_ID_1), expected_status_int=400) self.assertEqual( response['error'], 'Invalid activityType: InvalidActivityType') self.logout()
{ "content_hash": "e1f3dedfcded59fa15a57ee9fea62986", "timestamp": "", "source": "github", "line_count": 215, "max_line_length": 80, "avg_line_length": 40.311627906976746, "alnum_prop": 0.5835929387331257, "repo_name": "kevinlee12/oppia", "id": "a5186dcb64e676d63977b4695509881d580ff241", "size": "9272", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "core/controllers/learner_goals_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "205771" }, { "name": "HTML", "bytes": "1835761" }, { "name": "JavaScript", "bytes": "1182599" }, { "name": "PEG.js", "bytes": "71377" }, { "name": "Python", "bytes": "13670639" }, { "name": "Shell", "bytes": "2239" }, { "name": "TypeScript", "bytes": "13024194" } ], "symlink_target": "" }
if __name__ == '__main__': x = int(input()) y = int(input()) z = int(input()) n = int(input()) """ pythonListComprehensions.py You are given three integers and representing the dimensions of a cuboid along with an integer . You have to print a list of all possible coordinates given by on a 3D grid where the sum of is not equal to. """ print ([[a,b,c] for a in range(0,x+1) for b in range(0,y+1) for c in range(0,z+1) if a + b + c != n ])
{ "content_hash": "9d3843296609c4198a64603901f9b420", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 228, "avg_line_length": 42.083333333333336, "alnum_prop": 0.5762376237623762, "repo_name": "bluewitch/Code-Blue-Python", "id": "4c77c37553042741d9b9e7370196c3c3d478b50a", "size": "505", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "HR_pythonListComprehensions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1645545" } ], "symlink_target": "" }
import requests import logging from deployd.types.ping_response import PingResponse from deployd.common.decorators import singleton from deployd.common.exceptions import AgentException log = logging.getLogger(__name__) @singleton class RestfulClient(object): def __init__(self, config): self.config = config self.url_prefix = config.get_restful_service_url() self.url_version = config.get_restful_service_version() self.token = config.get_restful_service_token() self.default_timeout = 30 def __call(self, method): def api(path, params=None, data=None): url = '%s/%s%s' % (self.url_prefix, self.url_version, path) if self.token: headers = {'Authorization': 'token %s' % self.token, 'Content-type': 'application/json'} else: headers = {'Content-type': 'application/json'} response = getattr(requests, method)(url, headers=headers, params=params, json=data, timeout=self.default_timeout, verify=False) if response.status_code > 300: msg = "Teletraan failed to call backend server. Hint: %s, %s" % (response.status_code, response.content) log.error(msg) raise AgentException(msg) if (response.content): return response.json() return api def _ping_internal(self, ping_request): return self.__call('post')("/system/ping", data=ping_request) def ping(self, ping_request): # python object -> json response = self._ping_internal(ping_request.to_json()) # json -> python object ping_response = PingResponse(jsonValue=response) return ping_response
{ "content_hash": "1d5d353e0ccf8026f063665037630ff9", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 120, "avg_line_length": 36.46938775510204, "alnum_prop": 0.608282036933408, "repo_name": "mingzhaodotname/teletraan", "id": "7726f3f6d709085c78ea12a3164c645acc13f46d", "size": "2373", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "deploy-agent/deployd/client/restfulclient.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "268939" }, { "name": "HTML", "bytes": "357611" }, { "name": "Java", "bytes": "1051157" }, { "name": "JavaScript", "bytes": "3008229" }, { "name": "Makefile", "bytes": "184" }, { "name": "Python", "bytes": "697650" }, { "name": "Ruby", "bytes": "1124" }, { "name": "Shell", "bytes": "16702" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('article', '0007_auto_20151028_1813'), ] operations = [ migrations.AlterField( model_name='article', name='category', field=models.CharField(default=b'', max_length=50, blank=True), ), migrations.AlterField( model_name='article', name='content', field=models.TextField(default=b'', null=True), ), migrations.AlterField( model_name='article', name='difficulty', field=models.CharField(default=b'', max_length=1, null=True, choices=[(b'E', b'easy'), (b'M', b'medium'), (b'H', b'hard')]), ), ]
{ "content_hash": "4c62a114717e4879a5d5379edea7b0d3", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 136, "avg_line_length": 29.071428571428573, "alnum_prop": 0.5552825552825553, "repo_name": "mactanxin/leetcodeblog", "id": "d396222beb56da9831f2ecbd7033278c5f324848", "size": "838", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "article/migrations/0008_auto_20151028_1814.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "65161" }, { "name": "HTML", "bytes": "21084" }, { "name": "JavaScript", "bytes": "505887" }, { "name": "Python", "bytes": "17062" }, { "name": "Ruby", "bytes": "837" } ], "symlink_target": "" }
from unittest import TestCase from todone.parser.factory import ( ParserFactory, PresetArgument, ) from todone.parser.match import AlwaysMatch from todone.parser.textparser import TextParser class TestParserFactory(TestCase): def test_class_can_be_instantiated(self): ParserFactory() def test_factory_creates_parser(self): parser = ParserFactory.from_arg_list() self.assertEqual(type(parser), TextParser) def test_factory_creates_parser_without_arguments(self): parser = ParserFactory.from_arg_list() self.assertEqual(len(parser.arguments), 0) def test_factory_creates_parser_with_argument(self): parser = ParserFactory.from_arg_list( [(PresetArgument.all_remaining, {'name': 'test'}), ]) self.assertEqual(len(parser.arguments), 1) def test_factory_creates_parser_with_arguments(self): arglist = [ (PresetArgument.all_remaining, {'name': 'foo'}), (PresetArgument.all_remaining, {'name': 'bar'}), ] parser = ParserFactory.from_arg_list(arglist) self.assertEqual(len(parser.arguments), len(arglist)) def test_factory_creates_specified_types_of_arguments(self): arglist = [ (PresetArgument.all_remaining, {'name': 'foo'}), ] parser = ParserFactory.from_arg_list(arglist) arg = parser.arguments[0] self.assertEqual(arg.name, 'foo') self.assertTrue(issubclass(type(arg), AlwaysMatch)) def test_factory_can_create_all_presets(self): args = [(arg, {'name': 'foo'}) for arg in PresetArgument] ParserFactory.from_arg_list(args) def test_factory_can_override_presets(self): arglist = [ (PresetArgument.index, {'name': 'foo', 'options': ['bar', 'baz'], 'match': AlwaysMatch} ), ] parser = ParserFactory.from_arg_list(arglist) arg = parser.arguments[0] self.assertEqual(arg.options, ['bar', 'baz']) self.assertTrue(issubclass(type(arg), AlwaysMatch))
{ "content_hash": "45ed31778e95eb965fb84c87a1c72ecd", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 65, "avg_line_length": 34.68852459016394, "alnum_prop": 0.6257088846880907, "repo_name": "safnuk/todone", "id": "514be117f5abcc2cd06a6764e4944eb8a01b0cb4", "size": "2116", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "todone/parser/tests/test_factory.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Gherkin", "bytes": "7622" }, { "name": "Python", "bytes": "152589" } ], "symlink_target": "" }
import urllib2 import cookielib import re cookie = cookielib.CookieJar() handler=urllib2.HTTPCookieProcessor(cookie) opener = urllib2.build_opener(handler) response = opener.open('http://weibo.com/u/5038161234/home?wvr=5&lf=reg') for item in cookie: print 'Name = '+item.name print 'Value = '+item.value file=open('C:\Users\Ryan\Desktop\sinaCookie.txt','w') for item in cookie: file.write(item.name) file.write(item.value)
{ "content_hash": "e46d70ec2e17409c70449bc09bd6dcd6", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 73, "avg_line_length": 24.263157894736842, "alnum_prop": 0.702819956616052, "repo_name": "YChrisZhang/PythonCrawler", "id": "fe999276628625fca5553ca8cdb6db3124cf6a53", "size": "476", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "getCookie.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2730" } ], "symlink_target": "" }
"""Lite version of scipy.linalg. Notes ----- This module is a lite version of the linalg.py module in SciPy which contains high-level Python interface to the LAPACK library. The lite version only accesses the following LAPACK functions: dgesv, zgesv, dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. """ from __future__ import division, absolute_import, print_function __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError', 'multi_dot'] import warnings from numpy.core import ( array, asarray, zeros, empty, empty_like, transpose, intc, single, double, csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot, add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size, finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs, broadcast, atleast_2d, intp, asanyarray ) from numpy.lib import triu, asfarray from numpy.linalg import lapack_lite, _umath_linalg from numpy.matrixlib.defmatrix import matrix_power from numpy.compat import asbytes # For Python2/3 compatibility _N = asbytes('N') _V = asbytes('V') _A = asbytes('A') _S = asbytes('S') _L = asbytes('L') fortran_int = intc # Error object class LinAlgError(Exception): """ Generic Python-exception-derived object raised by linalg functions. General purpose exception class, derived from Python's exception.Exception class, programmatically raised in linalg functions when a Linear Algebra-related condition would prevent further correct execution of the function. Parameters ---------- None Examples -------- >>> from numpy import linalg as LA >>> LA.inv(np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "...linalg.py", line 350, in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) File "...linalg.py", line 249, in solve raise LinAlgError('Singular matrix') numpy.linalg.LinAlgError: Singular matrix """ pass # Dealing with errors in _umath_linalg _linalg_error_extobj = None def _determine_error_states(): global _linalg_error_extobj errobj = geterrobj() bufsize = errobj[0] with errstate(invalid='call', over='ignore', divide='ignore', under='ignore'): invalid_call_errmask = geterrobj()[1] _linalg_error_extobj = [bufsize, invalid_call_errmask, None] _determine_error_states() def _raise_linalgerror_singular(err, flag): raise LinAlgError("Singular matrix") def _raise_linalgerror_nonposdef(err, flag): raise LinAlgError("Matrix is not positive definite") def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): raise LinAlgError("Eigenvalues did not converge") def _raise_linalgerror_svd_nonconvergence(err, flag): raise LinAlgError("SVD did not converge") def get_linalg_error_extobj(callback): extobj = list(_linalg_error_extobj) extobj[2] = callback return extobj def _makearray(a): new = asarray(a) wrap = getattr(a, "__array_prepare__", new.__array_wrap__) return new, wrap def isComplexType(t): return issubclass(t, complexfloating) _real_types_map = {single : single, double : double, csingle : single, cdouble : double} _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _realType(t, default=double): return _real_types_map.get(t, default) def _complexType(t, default=cdouble): return _complex_types_map.get(t, default) def _linalgRealType(t): """Cast the type t to either double or cdouble.""" return double _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _commonType(*arrays): # in lite version, use higher precision (always double or cdouble) result_type = single is_complex = False for a in arrays: if issubclass(a.dtype.type, inexact): if isComplexType(a.dtype.type): is_complex = True rt = _realType(a.dtype.type, default=None) if rt is None: # unsupported inexact scalar raise TypeError("array type %s is unsupported in linalg" % (a.dtype.name,)) else: rt = double if rt is double: result_type = double if is_complex: t = cdouble result_type = _complex_types_map[result_type] else: t = double return t, result_type # _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). _fastCT = fastCopyAndTranspose def _to_native_byte_order(*arrays): ret = [] for arr in arrays: if arr.dtype.byteorder not in ('=', '|'): ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) else: ret.append(arr) if len(ret) == 1: return ret[0] else: return ret def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.type is type: cast_arrays = cast_arrays + (_fastCT(a),) else: cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays def _assertRank2(*arrays): for a in arrays: if len(a.shape) != 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'two-dimensional' % len(a.shape)) def _assertRankAtLeast2(*arrays): for a in arrays: if len(a.shape) < 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'at least two-dimensional' % len(a.shape)) def _assertSquareness(*arrays): for a in arrays: if max(a.shape) != min(a.shape): raise LinAlgError('Array must be square') def _assertNdSquareness(*arrays): for a in arrays: if max(a.shape[-2:]) != min(a.shape[-2:]): raise LinAlgError('Last 2 dimensions of the array must be square') def _assertFinite(*arrays): for a in arrays: if not (isfinite(a).all()): raise LinAlgError("Array must not contain infs or NaNs") def _assertNoEmpty2d(*arrays): for a in arrays: if a.size == 0 and product(a.shape[-2:]) == 0: raise LinAlgError("Arrays cannot be empty") # Linear equations def tensorsolve(a, b, axes=None): """ Solve the tensor equation ``a x = b`` for x. It is assumed that all indices of `x` are summed over in the product, together with the rightmost indices of `a`, as is done in, for example, ``tensordot(a, x, axes=len(b.shape))``. Parameters ---------- a : array_like Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals the shape of that sub-tensor of `a` consisting of the appropriate number of its rightmost indices, and must be such that ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be 'square'). b : array_like Right-hand tensor, which can be of any shape. axes : tuple of ints, optional Axes in `a` to reorder to the right, before inversion. If None (default), no reordering is done. Returns ------- x : ndarray, shape Q Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- tensordot, tensorinv, einsum Examples -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> b = np.random.randn(2*3, 4) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True """ a, wrap = _makearray(a) b = asarray(b) an = a.ndim if axes is not None: allaxes = list(range(0, an)) for k in axes: allaxes.remove(k) allaxes.insert(an, k) a = a.transpose(allaxes) oldshape = a.shape[-(an-b.ndim):] prod = 1 for k in oldshape: prod *= k a = a.reshape(-1, prod) b = b.ravel() res = wrap(solve(a, b)) res.shape = oldshape return res def solve(a, b): """ Solve a linear matrix equation, or system of linear scalar equations. Computes the "exact" solution, `x`, of the well-determined, i.e., full rank, linear matrix equation `ax = b`. Parameters ---------- a : (..., M, M) array_like Coefficient matrix. b : {(..., M,), (..., M, K)}, array_like Ordinate or "dependent variable" values. Returns ------- x : {(..., M,), (..., M, K)} ndarray Solution to the system a x = b. Returned shape is identical to `b`. Raises ------ LinAlgError If `a` is singular or not square. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The solutions are computed using LAPACK routine _gesv `a` must be square and of full-rank, i.e., all rows (or, equivalently, columns) must be linearly independent; if either is not true, use `lstsq` for the least-squares best "solution" of the system/equation. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pg. 22. Examples -------- Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: >>> a = np.array([[3,1], [1,2]]) >>> b = np.array([9,8]) >>> x = np.linalg.solve(a, b) >>> x array([ 2., 3.]) Check that the solution is correct: >>> np.allclose(np.dot(a, x), b) True """ a, _ = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) # We use the b = (..., M,) logic, only if the number of extra dimensions # match exactly if b.ndim == a.ndim - 1: if a.shape[-1] == 0 and b.shape[-1] == 0: # Legal, but the ufunc cannot handle the 0-sized inner dims # let the ufunc handle all wrong cases. a = a.reshape(a.shape[:-1]) bc = broadcast(a, b) return wrap(empty(bc.shape, dtype=result_t)) gufunc = _umath_linalg.solve1 else: if b.size == 0: if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0: a = a[:,:1].reshape(a.shape[:-1] + (1,)) bc = broadcast(a, b) return wrap(empty(bc.shape, dtype=result_t)) gufunc = _umath_linalg.solve signature = 'DD->D' if isComplexType(t) else 'dd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) r = gufunc(a, b, signature=signature, extobj=extobj) return wrap(r.astype(result_t)) def tensorinv(a, ind=2): """ Compute the 'inverse' of an N-dimensional array. The result is an inverse for `a` relative to the tensordot operation ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the tensordot operation. Parameters ---------- a : array_like Tensor to 'invert'. Its shape must be 'square', i. e., ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. ind : int, optional Number of first indices that are involved in the inverse sum. Must be a positive integer, default is 2. Returns ------- b : ndarray `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- tensordot, tensorsolve Examples -------- >>> a = np.eye(4*6) >>> a.shape = (4, 6, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) >>> b = np.random.randn(4, 6) >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True >>> a = np.eye(4*6) >>> a.shape = (24, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) >>> b = np.random.randn(24) >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) True """ a = asarray(a) oldshape = a.shape prod = 1 if ind > 0: invshape = oldshape[ind:] + oldshape[:ind] for k in oldshape[ind:]: prod *= k else: raise ValueError("Invalid ind argument.") a = a.reshape(prod, -1) ia = inv(a) return ia.reshape(*invshape) # Matrix inversion def inv(a): """ Compute the (multiplicative) inverse of a matrix. Given a square matrix `a`, return the matrix `ainv` satisfying ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. Parameters ---------- a : (..., M, M) array_like Matrix to be inverted. Returns ------- ainv : (..., M, M) ndarray or matrix (Multiplicative) inverse of the matrix `a`. Raises ------ LinAlgError If `a` is not square or inversion fails. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. Examples -------- >>> from numpy.linalg import inv >>> a = np.array([[1., 2.], [3., 4.]]) >>> ainv = inv(a) >>> np.allclose(np.dot(a, ainv), np.eye(2)) True >>> np.allclose(np.dot(ainv, a), np.eye(2)) True If a is a matrix object, then the return value is a matrix as well: >>> ainv = inv(np.matrix(a)) >>> ainv matrix([[-2. , 1. ], [ 1.5, -0.5]]) Inverses of several matrices can be computed at once: >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) >>> inv(a) array([[[-2. , 1. ], [ 1.5, -0.5]], [[-5. , 2. ], [ 3. , -1. ]]]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) if a.shape[-1] == 0: # The inner array is 0x0, the ufunc cannot handle this case return wrap(empty_like(a, dtype=result_t)) signature = 'D->D' if isComplexType(t) else 'd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) return wrap(ainv.astype(result_t)) # Cholesky decomposition def cholesky(a): """ Cholesky decomposition. Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, where `L` is lower-triangular and .H is the conjugate transpose operator (which is the ordinary transpose if `a` is real-valued). `a` must be Hermitian (symmetric if real-valued) and positive-definite. Only `L` is actually returned. Parameters ---------- a : (..., M, M) array_like Hermitian (symmetric if all elements are real), positive-definite input matrix. Returns ------- L : (..., M, M) array_like Upper or lower-triangular Cholesky factor of `a`. Returns a matrix object if `a` is a matrix object. Raises ------ LinAlgError If the decomposition fails, for example, if `a` is not positive-definite. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The Cholesky decomposition is often used as a fast way of solving .. math:: A \\mathbf{x} = \\mathbf{b} (when `A` is both Hermitian/symmetric and positive-definite). First, we solve for :math:`\\mathbf{y}` in .. math:: L \\mathbf{y} = \\mathbf{b}, and then for :math:`\\mathbf{x}` in .. math:: L.H \\mathbf{x} = \\mathbf{y}. Examples -------- >>> A = np.array([[1,-2j],[2j,5]]) >>> A array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> L = np.linalg.cholesky(A) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> np.dot(L, L.T.conj()) # verify that L * L.H = A array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? >>> np.linalg.cholesky(A) # an ndarray object is returned array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> # But a matrix object is returned if A is a matrix object >>> LA.cholesky(np.matrix(A)) matrix([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) """ extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) gufunc = _umath_linalg.cholesky_lo a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t)) # QR decompostion def qr(a, mode='reduced'): """ Compute the qr factorization of a matrix. Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is upper-triangular. Parameters ---------- a : array_like, shape (M, N) Matrix to be factored. mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional If K = min(M, N), then 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) 'complete' : returns q, r with dimensions (M, M), (M, N) 'r' : returns r only with dimensions (K, N) 'raw' : returns h, tau with dimensions (N, M), (K,) 'full' : alias of 'reduced', deprecated 'economic' : returns h from 'raw', deprecated. The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, see the notes for more information. The default is 'reduced' and to maintain backward compatibility with earlier versions of numpy both it and the old default 'full' can be omitted. Note that array h returned in 'raw' mode is transposed for calling Fortran. The 'economic' mode is deprecated. The modes 'full' and 'economic' may be passed using only the first letter for backwards compatibility, but all others must be spelled out. See the Notes for more explanation. Returns ------- q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not a is real/complex. The determinant may be either +/- 1 in that case. r : ndarray of float or complex, optional The upper-triangular matrix. (h, tau) : ndarrays of np.double or np.cdouble, optional The array h contains the Householder reflectors that generate q along with r. The tau array contains scaling factors for the reflectors. In the deprecated 'economic' mode only h is returned. Raises ------ LinAlgError If factoring fails. Notes ----- This is an interface to the LAPACK routines dgeqrf, zgeqrf, dorgqr, and zungqr. For more information on the qr factorization, see for example: http://en.wikipedia.org/wiki/QR_factorization Subclasses of `ndarray` are preserved except for the 'raw' mode. So if `a` is of type `matrix`, all the return values will be matrices too. New 'reduced', 'complete', and 'raw' options for mode were added in Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In addition the options 'full' and 'economic' were deprecated. Because 'full' was the previous default and 'reduced' is the new default, backward compatibility can be maintained by letting `mode` default. The 'raw' option was added so that LAPACK routines that can multiply arrays by q using the Householder reflectors can be used. Note that in this case the returned arrays are of type np.double or np.cdouble and the h array is transposed to be FORTRAN compatible. No routines using the 'raw' return are currently exposed by numpy, but some are available in lapack_lite and just await the necessary work. Examples -------- >>> a = np.random.randn(9, 6) >>> q, r = np.linalg.qr(a) >>> np.allclose(a, np.dot(q, r)) # a does equal qr True >>> r2 = np.linalg.qr(a, mode='r') >>> r3 = np.linalg.qr(a, mode='economic') >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' True >>> # But only triu parts are guaranteed equal when mode='economic' >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) True Example illustrating a common use of `qr`: solving of least squares problems What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points and you'll see that it should be y0 = 0, m = 1.) The answer is provided by solving the over-determined matrix equation ``Ax = b``, where:: A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) x = array([[y0], [m]]) b = array([[1], [0], [2], [1]]) If A = qr such that q is orthonormal (which is always possible via Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, however, we simply use `lstsq`.) >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) >>> A array([[0, 1], [1, 1], [1, 1], [2, 1]]) >>> b = np.array([1, 0, 2, 1]) >>> q, r = LA.qr(A) >>> p = np.dot(q.T, b) >>> np.dot(LA.inv(r), p) array([ 1.1e-16, 1.0e+00]) """ if mode not in ('reduced', 'complete', 'r', 'raw'): if mode in ('f', 'full'): msg = "".join(( "The 'full' option is deprecated in favor of 'reduced'.\n", "For backward compatibility let mode default.")) warnings.warn(msg, DeprecationWarning) mode = 'reduced' elif mode in ('e', 'economic'): msg = "The 'economic' option is deprecated.", warnings.warn(msg, DeprecationWarning) mode = 'economic' else: raise ValueError("Unrecognized mode '%s'" % mode) a, wrap = _makearray(a) _assertRank2(a) _assertNoEmpty2d(a) m, n = a.shape t, result_t = _commonType(a) a = _fastCopyAndTranspose(t, a) a = _to_native_byte_order(a) mn = min(m, n) tau = zeros((mn,), t) if isComplexType(t): lapack_routine = lapack_lite.zgeqrf routine_name = 'zgeqrf' else: lapack_routine = lapack_lite.dgeqrf routine_name = 'dgeqrf' # calculate optimal size of work data 'work' lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, n, a, m, tau, work, -1, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # do qr decomposition lwork = int(abs(work[0])) work = zeros((lwork,), t) results = lapack_routine(m, n, a, m, tau, work, lwork, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # handle modes that don't return q if mode == 'r': r = _fastCopyAndTranspose(result_t, a[:, :mn]) return wrap(triu(r)) if mode == 'raw': return a, tau if mode == 'economic': if t != result_t : a = a.astype(result_t) return wrap(a.T) # generate q from a if mode == 'complete' and m > n: mc = m q = empty((m, m), t) else: mc = mn q = empty((n, m), t) q[:n] = a if isComplexType(t): lapack_routine = lapack_lite.zungqr routine_name = 'zungqr' else: lapack_routine = lapack_lite.dorgqr routine_name = 'dorgqr' # determine optimal lwork lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # compute q lwork = int(abs(work[0])) work = zeros((lwork,), t) results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) q = _fastCopyAndTranspose(result_t, q[:mc]) r = _fastCopyAndTranspose(result_t, a[:, :mc]) return wrap(q), wrap(triu(r)) # Eigenvalues def eigvals(a): """ Compute the eigenvalues of a general matrix. Main difference between `eigvals` and `eig`: the eigenvectors aren't returned. Parameters ---------- a : (..., M, M) array_like A complex- or real-valued matrix whose eigenvalues will be computed. Returns ------- w : (..., M,) ndarray The eigenvalues, each repeated according to its multiplicity. They are not necessarily ordered, nor are they necessarily real for real matrices. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eig : eigenvalues and right eigenvectors of general arrays eigvalsh : eigenvalues of symmetric or Hermitian arrays. eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. This is implemented using the _geev LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. Examples -------- Illustration, using the fact that the eigenvalues of a diagonal matrix are its diagonal elements, that multiplying a matrix on the left by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as ``A``: >>> from numpy import linalg as LA >>> x = np.random.random() >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) (1.0, 1.0, 0.0) Now multiply a diagonal matrix by Q on one side and by Q.T on the other: >>> D = np.diag((-1,1)) >>> LA.eigvals(D) array([-1., 1.]) >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) array([ 1., -1.]) """ a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) _assertFinite(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) signature = 'D->D' if isComplexType(t) else 'd->D' w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) if not isComplexType(t): if all(w.imag == 0): w = w.real result_t = _realType(result_t) else: result_t = _complexType(result_t) return w.astype(result_t) def eigvalsh(a, UPLO='L'): """ Compute the eigenvalues of a Hermitian or real symmetric matrix. Main difference from eigh: the eigenvectors are not computed. Parameters ---------- a : (..., M, M) array_like A complex- or real-valued matrix whose eigenvalues are to be computed. UPLO : {'L', 'U'}, optional Same as `lower`, with 'L' for lower and 'U' for upper triangular. Deprecated. Returns ------- w : (..., M,) ndarray The eigenvalues, not necessarily ordered, each repeated according to its multiplicity. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. eigvals : eigenvalues of general real or complex arrays. eig : eigenvalues and right eigenvectors of general real or complex arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The eigenvalues are computed using LAPACK routines _ssyevd, _heevd Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) array([ 0.17157288+0.j, 5.82842712+0.j]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'") extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) if UPLO == 'L': gufunc = _umath_linalg.eigvalsh_lo else: gufunc = _umath_linalg.eigvalsh_up a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->d' if isComplexType(t) else 'd->d' w = gufunc(a, signature=signature, extobj=extobj) return w.astype(_realType(result_t)) def _convertarray(a): t, result_t = _commonType(a) a = _fastCT(a.astype(t)) return a, t, result_t # Eigenvectors def eig(a): """ Compute the eigenvalues and right eigenvectors of a square array. Parameters ---------- a : (..., M, M) array Matrices for which the eigenvalues and right eigenvectors will be computed Returns ------- w : (..., M) array The eigenvalues, each repeated according to its multiplicity. The eigenvalues are not necessarily ordered. The resulting array will be always be of complex type. When `a` is real the resulting eigenvalues will be real (0 imaginary part) or occur in conjugate pairs v : (..., M, M) array The normalized (unit "length") eigenvectors, such that the column ``v[:,i]`` is the eigenvector corresponding to the eigenvalue ``w[i]``. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigvals : eigenvalues of a non-symmetric array. eigh : eigenvalues and eigenvectors of a symmetric or Hermitian (conjugate symmetric) array. eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric) array. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. This is implemented using the _geev LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. The number `w` is an eigenvalue of `a` if there exists a vector `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. The array `v` of eigenvectors may not be of maximum rank, that is, some of the columns may be linearly dependent, although round-off error may obscure that fact. If the eigenvalues are all different, then theoretically the eigenvectors are linearly independent. Likewise, the (complex-valued) matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate transpose of `a`. Finally, it is emphasized that `v` consists of the *right* (as in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, and, in general, the left and right eigenvectors of a matrix are not necessarily the (perhaps conjugate) transposes of each other. References ---------- G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, Various pp. Examples -------- >>> from numpy import linalg as LA (Almost) trivial example with real e-values and e-vectors. >>> w, v = LA.eig(np.diag((1, 2, 3))) >>> w; v array([ 1., 2., 3.]) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) Real matrix possessing complex e-values and e-vectors; note that the e-values are complex conjugates of each other. >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) >>> w; v array([ 1. + 1.j, 1. - 1.j]) array([[ 0.70710678+0.j , 0.70710678+0.j ], [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) Complex-valued matrix with real e-values (but complex-valued e-vectors); note that a.conj().T = a, i.e., a is Hermitian. >>> a = np.array([[1, 1j], [-1j, 1]]) >>> w, v = LA.eig(a) >>> w; v array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], [ 0.70710678+0.j , 0.00000000+0.70710678j]]) Be careful about round-off error! >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) >>> # Theor. e-values are 1 +/- 1e-9 >>> w, v = LA.eig(a) >>> w; v array([ 1., 1.]) array([[ 1., 0.], [ 0., 1.]]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) _assertFinite(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) signature = 'D->DD' if isComplexType(t) else 'd->DD' w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) if not isComplexType(t) and all(w.imag == 0.0): w = w.real vt = vt.real result_t = _realType(result_t) else: result_t = _complexType(result_t) vt = vt.astype(result_t) return w.astype(result_t), wrap(vt) def eigh(a, UPLO='L'): """ Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix. Returns two objects, a 1-D array containing the eigenvalues of `a`, and a 2-D square array or matrix (depending on the input type) of the corresponding eigenvectors (in columns). Parameters ---------- a : (..., M, M) array Hermitian/Symmetric matrices whose eigenvalues and eigenvectors are to be computed. UPLO : {'L', 'U'}, optional Specifies whether the calculation is done with the lower triangular part of `a` ('L', default) or the upper triangular part ('U'). Returns ------- w : (..., M) ndarray The eigenvalues, not necessarily ordered. v : {(..., M, M) ndarray, (..., M, M) matrix} The column ``v[:, i]`` is the normalized eigenvector corresponding to the eigenvalue ``w[i]``. Will return a matrix object if `a` is a matrix object. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigvalsh : eigenvalues of symmetric or Hermitian arrays. eig : eigenvalues and right eigenvectors for non-symmetric arrays. eigvals : eigenvalues of non-symmetric arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd, _heevd The eigenvalues of real symmetric or complex Hermitian matrices are always real. [1]_ The array `v` of (column) eigenvectors is unitary and `a`, `w`, and `v` satisfy the equations ``dot(a, v[:, i]) = w[i] * v[:, i]``. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pg. 222. Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(a) >>> w; v array([ 0.17157288, 5.82842712]) array([[-0.92387953+0.j , -0.38268343+0.j ], [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair array([ 0.+0.j, 0.+0.j]) >>> A = np.matrix(a) # what happens if input is a matrix object >>> A matrix([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(A) >>> w; v array([ 0.17157288, 5.82842712]) matrix([[-0.92387953+0.j , -0.38268343+0.j ], [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'") a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) if UPLO == 'L': gufunc = _umath_linalg.eigh_lo else: gufunc = _umath_linalg.eigh_up signature = 'D->dD' if isComplexType(t) else 'd->dd' w, vt = gufunc(a, signature=signature, extobj=extobj) w = w.astype(_realType(result_t)) vt = vt.astype(result_t) return w, wrap(vt) # Singular value decomposition def svd(a, full_matrices=1, compute_uv=1): """ Singular Value Decomposition. Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v` are unitary and `s` is a 1-d array of `a`'s singular values. Parameters ---------- a : (..., M, N) array_like A real or complex matrix of shape (`M`, `N`) . full_matrices : bool, optional If True (default), `u` and `v` have the shapes (`M`, `M`) and (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`) and (`K`, `N`), respectively, where `K` = min(`M`, `N`). compute_uv : bool, optional Whether or not to compute `u` and `v` in addition to `s`. True by default. Returns ------- u : { (..., M, M), (..., M, K) } array Unitary matrices. The actual shape depends on the value of ``full_matrices``. Only returned when ``compute_uv`` is True. s : (..., K) array The singular values for every matrix, sorted in descending order. v : { (..., N, N), (..., K, N) } array Unitary matrices. The actual shape depends on the value of ``full_matrices``. Only returned when ``compute_uv`` is True. Raises ------ LinAlgError If SVD computation does not converge. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The decomposition is performed using LAPACK routine _gesdd The SVD is commonly written as ``a = U S V.H``. The `v` returned by this function is ``V.H`` and ``u = U``. If ``U`` is a unitary matrix, it means that it satisfies ``U.H = inv(U)``. The rows of `v` are the eigenvectors of ``a.H a``. The columns of `u` are the eigenvectors of ``a a.H``. For row ``i`` in `v` and column ``i`` in `u`, the corresponding eigenvalue is ``s[i]**2``. If `a` is a `matrix` object (as opposed to an `ndarray`), then so are all the return values. Examples -------- >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) Reconstruction based on full SVD: >>> U, s, V = np.linalg.svd(a, full_matrices=True) >>> U.shape, V.shape, s.shape ((9, 9), (6, 6), (6,)) >>> S = np.zeros((9, 6), dtype=complex) >>> S[:6, :6] = np.diag(s) >>> np.allclose(a, np.dot(U, np.dot(S, V))) True Reconstruction based on reduced SVD: >>> U, s, V = np.linalg.svd(a, full_matrices=False) >>> U.shape, V.shape, s.shape ((9, 6), (6, 6), (6,)) >>> S = np.diag(s) >>> np.allclose(a, np.dot(U, np.dot(S, V))) True """ a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) m = a.shape[-2] n = a.shape[-1] if compute_uv: if full_matrices: if m < n: gufunc = _umath_linalg.svd_m_f else: gufunc = _umath_linalg.svd_n_f else: if m < n: gufunc = _umath_linalg.svd_m_s else: gufunc = _umath_linalg.svd_n_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' u, s, vt = gufunc(a, signature=signature, extobj=extobj) u = u.astype(result_t) s = s.astype(_realType(result_t)) vt = vt.astype(result_t) return wrap(u), s, wrap(vt) else: if m < n: gufunc = _umath_linalg.svd_m else: gufunc = _umath_linalg.svd_n signature = 'D->d' if isComplexType(t) else 'd->d' s = gufunc(a, signature=signature, extobj=extobj) s = s.astype(_realType(result_t)) return s def cond(x, p=None): """ Compute the condition number of a matrix. This function is capable of returning the condition number using one of seven different norms, depending on the value of `p` (see Parameters below). Parameters ---------- x : (M, N) array_like The matrix whose condition number is sought. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional Order of the norm: ===== ============================ p norm for matrices ===== ============================ None 2-norm, computed directly using the ``SVD`` 'fro' Frobenius norm inf max(sum(abs(x), axis=1)) -inf min(sum(abs(x), axis=1)) 1 max(sum(abs(x), axis=0)) -1 min(sum(abs(x), axis=0)) 2 2-norm (largest sing. value) -2 smallest singular value ===== ============================ inf means the numpy.inf object, and the Frobenius norm is the root-of-sum-of-squares norm. Returns ------- c : {float, inf} The condition number of the matrix. May be infinite. See Also -------- numpy.linalg.norm Notes ----- The condition number of `x` is defined as the norm of `x` times the norm of the inverse of `x` [1]_; the norm can be the usual L2-norm (root-of-sum-of-squares) or one of a number of other matrix norms. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, Academic Press, Inc., 1980, pg. 285. Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) >>> a array([[ 1, 0, -1], [ 0, 1, 0], [ 1, 0, 1]]) >>> LA.cond(a) 1.4142135623730951 >>> LA.cond(a, 'fro') 3.1622776601683795 >>> LA.cond(a, np.inf) 2.0 >>> LA.cond(a, -np.inf) 1.0 >>> LA.cond(a, 1) 2.0 >>> LA.cond(a, -1) 1.0 >>> LA.cond(a, 2) 1.4142135623730951 >>> LA.cond(a, -2) 0.70710678118654746 >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) 0.70710678118654746 """ x = asarray(x) # in case we have a matrix if p is None: s = svd(x, compute_uv=False) return s[0]/s[-1] else: return norm(x, p)*norm(inv(x), p) def matrix_rank(M, tol=None): """ Return matrix rank of array using SVD method Rank of the array is the number of SVD singular values of the array that are greater than `tol`. Parameters ---------- M : {(M,), (M, N)} array_like array of <=2 dimensions tol : {None, float}, optional threshold below which SVD values are considered zero. If `tol` is None, and ``S`` is an array with singular values for `M`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` is set to ``S.max() * max(M.shape) * eps``. Notes ----- The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `M`. By default, we identify singular values less than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with the symbols defined above). This is the algorithm MATLAB uses [1]. It also appears in *Numerical recipes* in the discussion of SVD solutions for linear least squares [2]. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there is a column in `M` that is an exact (in floating point) linear combination of other columns in `M`. Computing the SVD on `M` will not produce a singular value exactly equal to 0 in general: any difference of the smallest SVD value from 0 will be caused by numerical imprecision in the calculation of the SVD. Our threshold for small SVD values takes this numerical imprecision into account, and the default threshold will detect such numerical rank deficiency. The threshold may declare a matrix `M` rank deficient even if the linear combination of some columns of `M` is not exactly equal to another column of `M` but only numerically very close to another column of `M`. We chose our default threshold because it is in wide use. Other thresholds are possible. For example, elsewhere in the 2007 edition of *Numerical recipes* there is an alternative threshold of ``S.max() * np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe this threshold as being based on "expected roundoff error" (p 71). The thresholds above deal with floating point roundoff error in the calculation of the SVD. However, you may have more information about the sources of error in `M` that would make you consider other tolerance values to detect *effective* rank deficiency. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties greater than floating point epsilon, choosing a tolerance near that uncertainty may be preferable. The tolerance may be absolute if the uncertainties are absolute rather than relative. References ---------- .. [1] MATLAB reference documention, "Rank" http://www.mathworks.com/help/techdoc/ref/rank.html .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, page 795. Examples -------- >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix >>> matrix_rank(I) 3 >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 1 >>> matrix_rank(np.zeros((4,))) 0 """ M = asarray(M) if M.ndim > 2: raise TypeError('array should have 2 or fewer dimensions') if M.ndim < 2: return int(not all(M==0)) S = svd(M, compute_uv=False) if tol is None: tol = S.max() * max(M.shape) * finfo(S.dtype).eps return sum(S > tol) # Generalized inverse def pinv(a, rcond=1e-15 ): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate the generalized inverse of a matrix using its singular-value decomposition (SVD) and including all *large* singular values. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. rcond : float Cutoff for small singular values. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Returns ------- B : (N, M) ndarray The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so is `B`. Raises ------ LinAlgError If the SVD computation does not converge. Notes ----- The pseudo-inverse of a matrix A, denoted :math:`A^+`, is defined as: "the matrix that 'solves' [the least-squares problem] :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular value decomposition of A, then :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting of A's so-called singular values, (followed, typically, by zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix consisting of the reciprocals of A's singular values (again, followed by zeros). [1]_ References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pp. 139-142. Examples -------- The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: >>> a = np.random.randn(9, 6) >>> B = np.linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a, wrap = _makearray(a) _assertNoEmpty2d(a) a = a.conjugate() u, s, vt = svd(a, 0) m = u.shape[0] n = vt.shape[1] cutoff = rcond*maximum.reduce(s) for i in range(min(n, m)): if s[i] > cutoff: s[i] = 1./s[i] else: s[i] = 0.; res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) return wrap(res) # Determinant def slogdet(a): """ Compute the sign and (natural) logarithm of the determinant of an array. If an array has a very small or very large determinant, then a call to `det` may overflow or underflow. This routine is more robust against such issues, because it computes the logarithm of the determinant rather than the determinant itself. Parameters ---------- a : (..., M, M) array_like Input array, has to be a square 2-D array. Returns ------- sign : (...) array_like A number representing the sign of the determinant. For a real matrix, this is 1, 0, or -1. For a complex matrix, this is a complex number with absolute value 1 (i.e., it is on the unit circle), or else 0. logdet : (...) array_like The natural log of the absolute value of the determinant. If the determinant is zero, then `sign` will be 0 and `logdet` will be -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. See Also -------- det Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. .. versionadded:: 1.6.0. The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. Examples -------- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logdet) = np.linalg.slogdet(a) >>> (sign, logdet) (-1, 0.69314718055994529) >>> sign * np.exp(logdet) -2.0 Computing log-determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (3, 2, 2) >>> sign, logdet = np.linalg.slogdet(a) >>> (sign, logdet) (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) >>> sign * np.exp(logdet) array([-2., -3., -8.]) This routine succeeds where ordinary `det` does not: >>> np.linalg.det(np.eye(500) * 0.1) 0.0 >>> np.linalg.slogdet(np.eye(500) * 0.1) (1, -1151.2925464970228) """ a = asarray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) real_t = _realType(result_t) signature = 'D->Dd' if isComplexType(t) else 'd->dd' sign, logdet = _umath_linalg.slogdet(a, signature=signature) return sign.astype(result_t), logdet.astype(real_t) def det(a): """ Compute the determinant of an array. Parameters ---------- a : (..., M, M) array_like Input array to compute determinants for. Returns ------- det : (...) array_like Determinant of `a`. See Also -------- slogdet : Another way to representing the determinant, more suitable for large matrices where underflow/overflow may occur. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. Examples -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 Computing determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (2, 2, 2 >>> np.linalg.det(a) array([-2., -3., -8.]) """ a = asarray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' return _umath_linalg.det(a, signature=signature).astype(result_t) # Linear Least Squares def lstsq(a, b, rcond=-1): """ Return the least-squares solution to a linear matrix equation. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Parameters ---------- a : (M, N) array_like "Coefficient" matrix. b : {(M,), (M, K)} array_like Ordinate or "dependent variable" values. If `b` is two-dimensional, the least-squares solution is calculated for each of the `K` columns of `b`. rcond : float, optional Cut-off ratio for small singular values of `a`. Singular values are set to zero if they are smaller than `rcond` times the largest singular value of `a`. Returns ------- x : {(N,), (N, K)} ndarray Least-squares solution. If `b` is two-dimensional, the solutions are in the `K` columns of `x`. residuals : {(), (1,), (K,)} ndarray Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. If the rank of `a` is < N or M <= N, this is an empty array. If `b` is 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,). rank : int Rank of matrix `a`. s : (min(M, N),) ndarray Singular values of `a`. Raises ------ LinAlgError If computation does not converge. Notes ----- If `b` is a matrix, then all array results are returned as matrices. Examples -------- Fit a line, ``y = mx + c``, through some noisy data-points: >>> x = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) By examining the coefficients, we see that the line should have a gradient of roughly 1 and cut the y-axis at, more or less, -1. We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: >>> A = np.vstack([x, np.ones(len(x))]).T >>> A array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]]) >>> m, c = np.linalg.lstsq(A, y)[0] >>> print m, c 1.0 -0.95 Plot the data along with the fitted line: >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o', label='Original data', markersize=10) >>> plt.plot(x, m*x + c, 'r', label='Fitted line') >>> plt.legend() >>> plt.show() """ import math a, _ = _makearray(a) b, wrap = _makearray(b) is_1d = len(b.shape) == 1 if is_1d: b = b[:, newaxis] _assertRank2(a, b) m = a.shape[0] n = a.shape[1] n_rhs = b.shape[1] ldb = max(n, m) if m != b.shape[0]: raise LinAlgError('Incompatible dimensions') t, result_t = _commonType(a, b) result_real_t = _realType(result_t) real_t = _linalgRealType(t) bstar = zeros((ldb, n_rhs), t) bstar[:b.shape[0], :n_rhs] = b.copy() a, bstar = _fastCopyAndTranspose(t, a, bstar) a, bstar = _to_native_byte_order(a, bstar) s = zeros((min(m, n),), real_t) nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 ) iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int) if isComplexType(t): lapack_routine = lapack_lite.zgelsd lwork = 1 rwork = zeros((lwork,), real_t) work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, rwork, iwork, 0) lwork = int(abs(work[0])) rwork = zeros((lwork,), real_t) a_real = zeros((m, n), real_t) bstar_real = zeros((ldb, n_rhs,), real_t) results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m, bstar_real, ldb, s, rcond, 0, rwork, -1, iwork, 0) lrwork = int(rwork[0]) work = zeros((lwork,), t) rwork = zeros((lrwork,), real_t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, rwork, iwork, 0) else: lapack_routine = lapack_lite.dgelsd lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, iwork, 0) lwork = int(work[0]) work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, iwork, 0) if results['info'] > 0: raise LinAlgError('SVD did not converge in Linear Least Squares') resids = array([], result_real_t) if is_1d: x = array(ravel(bstar)[:n], dtype=result_t, copy=True) if results['rank'] == n and m > n: if isComplexType(t): resids = array([sum(abs(ravel(bstar)[n:])**2)], dtype=result_real_t) else: resids = array([sum((ravel(bstar)[n:])**2)], dtype=result_real_t) else: x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True) if results['rank'] == n and m > n: if isComplexType(t): resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype( result_real_t) else: resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype( result_real_t) st = s[:min(n, m)].copy().astype(result_real_t) return wrap(x), wrap(resids), results['rank'], st def _multi_svd_norm(x, row_axis, col_axis, op): """Compute a function of the singular values of the 2-D matrices in `x`. This is a private utility function used by numpy.linalg.norm(). Parameters ---------- x : ndarray row_axis, col_axis : int The axes of `x` that hold the 2-D matrices. op : callable This should be either numpy.amin or numpy.amax or numpy.sum. Returns ------- result : float or ndarray If `x` is 2-D, the return values is a float. Otherwise, it is an array with ``x.ndim - 2`` dimensions. The return values are either the minimum or maximum or sum of the singular values of the matrices, depending on whether `op` is `numpy.amin` or `numpy.amax` or `numpy.sum`. """ if row_axis > col_axis: row_axis -= 1 y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1) result = op(svd(y, compute_uv=0), axis=-1) return result def norm(x, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. Parameters ---------- x : array_like Input array. If `axis` is None, `x` must be 1-D or 2-D. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object. axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. keepdims : bool, optional .. versionadded:: 1.10.0 If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `x`. Returns ------- n : float or ndarray Norm of the matrix or vector(s). Notes ----- For values of ``ord <= 0``, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- 'nuc' nuclear norm -- inf max(sum(abs(x), axis=1)) max(abs(x)) -inf min(sum(abs(x), axis=1)) min(abs(x)) 0 -- sum(x != 0) 1 max(sum(abs(x), axis=0)) as below -1 min(sum(abs(x), axis=0)) as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` The nuclear norm is the sum of the singular values. References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], [-1, 0, 1], [ 2, 3, 4]]) >>> LA.norm(a) 7.745966692414834 >>> LA.norm(b) 7.745966692414834 >>> LA.norm(b, 'fro') 7.745966692414834 >>> LA.norm(a, np.inf) 4 >>> LA.norm(b, np.inf) 9 >>> LA.norm(a, -np.inf) 0 >>> LA.norm(b, -np.inf) 2 >>> LA.norm(a, 1) 20 >>> LA.norm(b, 1) 7 >>> LA.norm(a, -1) -4.6566128774142013e-010 >>> LA.norm(b, -1) 6 >>> LA.norm(a, 2) 7.745966692414834 >>> LA.norm(b, 2) 7.3484692283495345 >>> LA.norm(a, -2) nan >>> LA.norm(b, -2) 1.8570331885190563e-016 >>> LA.norm(a, 3) 5.8480354764257312 >>> LA.norm(a, -3) nan Using the `axis` argument to compute vector norms: >>> c = np.array([[ 1, 2, 3], ... [-1, 1, 4]]) >>> LA.norm(c, axis=0) array([ 1.41421356, 2.23606798, 5. ]) >>> LA.norm(c, axis=1) array([ 3.74165739, 4.24264069]) >>> LA.norm(c, ord=1, axis=1) array([6, 6]) Using the `axis` argument to compute matrix norms: >>> m = np.arange(8).reshape(2,2,2) >>> LA.norm(m, axis=(1,2)) array([ 3.74165739, 11.22497216]) >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) (3.7416573867739413, 11.224972160321824) """ x = asarray(x) # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim if ((ord is None) or (ord in ('f', 'fro') and ndim == 2) or (ord == 2 and ndim == 1)): x = x.ravel(order='K') if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) else: sqnorm = dot(x, x) ret = sqrt(sqnorm) if keepdims: ret = ret.reshape(ndim*[1]) return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): try: axis = int(axis) except: raise TypeError("'axis' must be None, an integer or a tuple of integers") axis = (axis,) if len(axis) == 1: if ord == Inf: return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -Inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm return (x != 0).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup s = (x.conj() * x).real return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) else: try: ord + 1 except TypeError: raise ValueError("Invalid norm order for vectors.") if x.dtype.type is longdouble: # Convert to a float type, so integer arrays give # float results. Don't apply asfarray to longdouble arrays, # because it will downcast to float64. absx = abs(x) else: absx = x if isComplexType(x.dtype.type) else asfarray(x) if absx.dtype is x.dtype: absx = abs(absx) else: # if the type changed, we can safely overwrite absx abs(absx, out=absx) absx **= ord return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord) elif len(axis) == 2: row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if not (0 <= row_axis < nd and 0 <= col_axis < nd): raise ValueError('Invalid axis %r for an array with shape %r' % (axis, x.shape)) if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: ret = _multi_svd_norm(x, row_axis, col_axis, amax) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) elif ord == Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) elif ord == -Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': ret = _multi_svd_norm(x, row_axis, col_axis, sum) else: raise ValueError("Invalid norm order for matrices.") if keepdims: ret_shape = list(x.shape) ret_shape[axis[0]] = 1 ret_shape[axis[1]] = 1 ret = ret.reshape(ret_shape) return ret else: raise ValueError("Improper number of dimensions to norm.") # multi_dot def multi_dot(arrays): """ Compute the dot product of two or more arrays in a single function call, while automatically selecting the fastest evaluation order. `multi_dot` chains `numpy.dot` and uses optimal parenthesization of the matrices [1]_ [2]_. Depending on the shapes of the matrices, this can speed up the multiplication a lot. If the first argument is 1-D it is treated as a row vector. If the last argument is 1-D it is treated as a column vector. The other arguments must be 2-D. Think of `multi_dot` as:: def multi_dot(arrays): return functools.reduce(np.dot, arrays) Parameters ---------- arrays : sequence of array_like If the first argument is 1-D it is treated as row vector. If the last argument is 1-D it is treated as column vector. The other arguments must be 2-D. Returns ------- output : ndarray Returns the dot product of the supplied arrays. See Also -------- dot : dot multiplication with two arguments. References ---------- .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication Examples -------- `multi_dot` allows you to write:: >>> from numpy.linalg import multi_dot >>> # Prepare some data >>> A = np.random.random(10000, 100) >>> B = np.random.random(100, 1000) >>> C = np.random.random(1000, 5) >>> D = np.random.random(5, 333) >>> # the actual dot multiplication >>> multi_dot([A, B, C, D]) instead of:: >>> np.dot(np.dot(np.dot(A, B), C), D) >>> # or >>> A.dot(B).dot(C).dot(D) Example: multiplication costs of different parenthesizations ------------------------------------------------------------ The cost for a matrix multiplication can be calculated with the following function:: def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Let's assume we have three matrices :math:`A_{10x100}, B_{100x5}, C_{5x50}$`. The costs for the two different parenthesizations are as follows:: cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 """ n = len(arrays) # optimization only makes sense for len(arrays) > 2 if n < 2: raise ValueError("Expecting at least two arrays.") elif n == 2: return dot(arrays[0], arrays[1]) arrays = [asanyarray(a) for a in arrays] # save original ndim to reshape the result array into the proper form later ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim # Explicitly convert vectors to 2D arrays to keep the logic of the internal # _multi_dot_* functions as simple as possible. if arrays[0].ndim == 1: arrays[0] = atleast_2d(arrays[0]) if arrays[-1].ndim == 1: arrays[-1] = atleast_2d(arrays[-1]).T _assertRank2(*arrays) # _multi_dot_three is much faster than _multi_dot_matrix_chain_order if n == 3: result = _multi_dot_three(arrays[0], arrays[1], arrays[2]) else: order = _multi_dot_matrix_chain_order(arrays) result = _multi_dot(arrays, order, 0, n - 1) # return proper shape if ndim_first == 1 and ndim_last == 1: return result[0, 0] # scalar elif ndim_first == 1 or ndim_last == 1: return result.ravel() # 1-D else: return result def _multi_dot_three(A, B, C): """ Find the best order for three arrays and do the multiplication. For three arguments `_multi_dot_three` is approximately 15 times faster than `_multi_dot_matrix_chain_order` """ # cost1 = cost((AB)C) cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB) A.shape[0] * B.shape[1] * C.shape[1]) # (--)C # cost2 = cost((AB)C) cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC) A.shape[0] * A.shape[1] * C.shape[1]) # A(--) if cost1 < cost2: return dot(dot(A, B), C) else: return dot(A, dot(B, C)) def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ Return a np.array that encodes the optimal order of mutiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. Also return the cost matrix if `return_costs` is `True` The implementation CLOSELY follows Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. cost[i, j] = min([ cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) for k in range(i, j)]) """ n = len(arrays) # p stores the dimensions of the matrices # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] # m is a matrix of costs of the subproblems # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} m = zeros((n, n), dtype=double) # s is the actual ordering # s[i, j] is the value of k at which we split the product A_i..A_j s = empty((n, n), dtype=intp) for l in range(1, n): for i in range(n - l): j = i + l m[i, j] = Inf for k in range(i, j): q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index return (s, m) if return_costs else s def _multi_dot(arrays, order, i, j): """Actually do the multiplication with the given order.""" if i == j: return arrays[i] else: return dot(_multi_dot(arrays, order, i, order[i, j]), _multi_dot(arrays, order, order[i, j] + 1, j))
{ "content_hash": "b9dfe44b9a8cf13f50c9b6aaec1c25c7", "timestamp": "", "source": "github", "line_count": 2376, "max_line_length": 85, "avg_line_length": 31.553030303030305, "alnum_prop": 0.5668534080298786, "repo_name": "yiakwy/numpy", "id": "30180f24ab239b9362f1f9f1b1f7e5aa89666e5c", "size": "74970", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "numpy/linalg/linalg.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "7447231" }, { "name": "C++", "bytes": "111187" }, { "name": "FORTRAN", "bytes": "6310" }, { "name": "Makefile", "bytes": "2574" }, { "name": "Python", "bytes": "6126399" }, { "name": "Shell", "bytes": "2241" } ], "symlink_target": "" }
"""Web Crawler/Spider This module implements a web crawler. This is very _basic_ only and needs to be extended to do anything usefull with the traversed pages. """ import re import sys import time import math import urllib2 import urlparse import optparse from cgi import escape from traceback import format_exc from Queue import Queue, Empty as QueueEmpty from BeautifulSoup import BeautifulSoup __version__ = "0.2" __copyright__ = "CopyRight (C) 2008-2011 by James Mills" __license__ = "MIT" __author__ = "James Mills" __author_email__ = "James Mills, James dot Mills st dotred dot com dot au" USAGE = "%prog [options] <url>" VERSION = "%prog v" + __version__ AGENT = "%s/%s" % (__name__, __version__) class Crawler(object): def __init__(self, root, depth, locked=True): self.root = root self.depth = depth self.locked = locked self.host = urlparse.urlparse(root)[1] self.urls = [] self.links = 0 self.followed = 0 def crawl(self): page = Fetcher(self.root) page.fetch() q = Queue() for url in page.urls: q.put(url) followed = [self.root] n = 0 while True: try: url = q.get() except QueueEmpty: break n += 1 if url not in followed: try: host = urlparse.urlparse(url)[1] if self.locked and re.match(".*%s" % self.host, host): followed.append(url) self.followed += 1 page = Fetcher(url) page.fetch() for i, url in enumerate(page): if url not in self.urls: self.links += 1 q.put(url) self.urls.append(url) if n > self.depth > 0: break except Exception, e: print "ERROR: Can't process url '%s' (%s)" % (url, e) print format_exc() class Fetcher(object): def __init__(self, url): self.url = url self.urls = [] def __getitem__(self, x): return self.urls[x] def _addHeaders(self, request): request.add_header("User-Agent", AGENT) def open(self): url = self.url try: request = urllib2.Request(url) handle = urllib2.build_opener() except IOError: return None return request, handle def fetch(self): request, handle = self.open() self._addHeaders(request) if handle: try: content = unicode(handle.open(request).read(), "utf-8", errors="replace") soup = BeautifulSoup(content) tags = soup('a') except urllib2.HTTPError, error: if error.code == 404: print >> sys.stderr, "ERROR: %s -> %s" % (error, error.url) else: print >> sys.stderr, "ERROR: %s" % error tags = [] except urllib2.URLError, error: print >> sys.stderr, "ERROR: %s" % error tags = [] for tag in tags: href = tag.get("href") if href is not None: url = urlparse.urljoin(self.url, escape(href)) if url not in self: self.urls.append(url) def getLinks(url): page = Fetcher(url) page.fetch() for i, url in enumerate(page): print "%d. %s" % (i, url) def parse_options(): """parse_options() -> opts, args Parse any command-line options given returning both the parsed options and arguments. """ parser = optparse.OptionParser(usage=USAGE, version=VERSION) parser.add_option("-q", "--quiet", action="store_true", default=False, dest="quiet", help="Enable quiet mode") parser.add_option("-l", "--links", action="store_true", default=False, dest="links", help="Get links for specified url only") parser.add_option("-d", "--depth", action="store", type="int", default=30, dest="depth", help="Maximum depth to traverse") opts, args = parser.parse_args() if len(args) < 1: parser.print_help() raise SystemExit, 1 return opts, args def main(): opts, args = parse_options() url = args[0] if opts.links: getLinks(url) raise SystemExit, 0 depth = opts.depth sTime = time.time() print "Crawling %s (Max Depth: %d)" % (url, depth) crawler = Crawler(url, depth) crawler.crawl() print "\n".join(crawler.urls) eTime = time.time() tTime = eTime - sTime print "Found: %d" % crawler.links print "Followed: %d" % crawler.followed print "Stats: (%d/s after %0.2fs)" % ( int(math.ceil(float(crawler.links) / tTime)), tTime) if __name__ == "__main__": main()
{ "content_hash": "f4045d8b07cea5025c96543ec4b5c903", "timestamp": "", "source": "github", "line_count": 188, "max_line_length": 79, "avg_line_length": 27.48936170212766, "alnum_prop": 0.5089009287925697, "repo_name": "cwoebker/relo", "id": "82702414e631b075deaf1c34a6599a63e936542f", "size": "5191", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "relo/net/crawl.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "168912" }, { "name": "Shell", "bytes": "4507" } ], "symlink_target": "" }
import urllib from bs4 import BeautifulSoup url = "http://192.168.16.128/WackoPicko/" page = urllib.request.urlopen(url) s_p = page.read() s = BeautifulSoup(s_p,"html.parser") a=[] for form in s.findAll("title"): a = form print (a)
{ "content_hash": "61a25cdbd3c004879f2179b2db060b0f", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 41, "avg_line_length": 17, "alnum_prop": 0.6848739495798319, "repo_name": "nwiizo/workspace_2017", "id": "553f99091995404ac0b7caf3c2a4a2e39643543d", "size": "286", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "owasp7/get_tag_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "173" }, { "name": "C++", "bytes": "7105" }, { "name": "CSS", "bytes": "50021" }, { "name": "Go", "bytes": "112005" }, { "name": "HTML", "bytes": "66435" }, { "name": "JavaScript", "bytes": "73266" }, { "name": "Makefile", "bytes": "1227" }, { "name": "PHP", "bytes": "3916" }, { "name": "PowerShell", "bytes": "277598" }, { "name": "Python", "bytes": "11925958" }, { "name": "Ruby", "bytes": "3779" }, { "name": "Rust", "bytes": "1484076" }, { "name": "Shell", "bytes": "86558" } ], "symlink_target": "" }
import sys import os from os import listdir from os.path import isfile, join, isdir from os import walk from collections import defaultdict import operator import re from math import log import random import pickle # This list contains attributes which have already been used in the decision tree and cannot be used again for splitting on. cantuselist = [] # This list represents the bins used to split the dataset on binslist = [0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.8,1.0] # This is the data structure for the decision tree nodes # data contains the word or will contain boolean True/False for leaf nodes # True indicates a spam Document and False indicates a non-spam document class Tree(object): def __init__(self): self.left = None self.right = None self.data = None self.bin = None self.isLeaf = False # This method returns the appropriate bin for a word given its frequency. def binFinder(freq): for abin in binslist: if freq<=abin: return abin # This method is used to print the decision tree given root node. def print_tree(rootnode): thislevel = [rootnode] while thislevel: nextlevel = list() for n in thislevel: print n.data, if n.left: nextlevel.append(n.left) if n.right: nextlevel.append(n.right) print thislevel = nextlevel # This method is used to print the decision tree given root node. def print_tree2(rootnode): if rootnode is None: return print "Current Node is : " + str(rootnode.data) try: print " Left of " + str(rootnode.data) + " is " + str(rootnode.left.data) except AttributeError: print " Left of " + str(rootnode.data) + " is " try: print " Right of " + str(rootnode.data) + " is " + str(rootnode.right.data) except AttributeError: print " Right of " + str(rootnode.data) + " is " print_tree2(rootnode.left) print_tree2(rootnode.right) # This method is used to print the top 4 levels of decision tree given root node. def print_tree3(rootnode): print "\nThe top 4 levels of Decision Tree are as follows:" # Level 1 if rootnode is not None and rootnode.data is not None: print "\t\t\t\t" + str(rootnode.data) + "(" + str(rootnode.bin) + ")" + "\n" # Level 2 if rootnode is not None and rootnode.left is not None and rootnode.left.data is not None: print "\t\t\t" + str(rootnode.left.data)+ "(" + str(rootnode.left.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.data is not None: print "\t\t\t\t" + str(rootnode.right.data)+ "(" + str(rootnode.right.bin) + ")" + "\n" # Level 3 if rootnode is not None and rootnode.left is not None and rootnode.left.left is not None and rootnode.left.left.data is not None: print "\t" + str(rootnode.left.left.data)+ "(" + str(rootnode.left.left.bin) + ")", if rootnode is not None and rootnode.left is not None and rootnode.left.right is not None and rootnode.left.right.data is not None: print "\t\t\t\t" + str(rootnode.left.right.data)+ "(" + str(rootnode.left.right.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.left is not None and rootnode.right.left.data is not None: print "\t\t\t\t\t" + str(rootnode.right.left.data)+ "(" + str(rootnode.right.left.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.right is not None and rootnode.right.right.data is not None: print "\t\t\t" + str(rootnode.right.right.data)+ "(" + str(rootnode.right.right.bin) + ")" + "\n" # Level 4 if rootnode is not None and rootnode.left is not None and rootnode.left.left is not None and rootnode.left.left.left is not None and rootnode.left.left.left.data is not None: print "" + str(rootnode.left.left.left.data)+ "(" + str(rootnode.left.left.left.bin) + ")", if rootnode is not None and rootnode.left is not None and rootnode.left.left is not None and rootnode.left.left.right is not None and rootnode.left.left.right.data is not None: print "\t\t" + str(rootnode.left.left.right.data)+ "(" + str(rootnode.left.left.right.bin) + ")", if rootnode is not None and rootnode.left is not None and rootnode.left.right is not None and rootnode.left.right.left is not None and rootnode.left.right.left.data is not None: print "\t\t" + str(rootnode.left.right.left.data)+ "(" + str(rootnode.left.right.left.bin) + ")", if rootnode is not None and rootnode.left is not None and rootnode.left.right is not None and rootnode.left.right.right is not None and rootnode.left.right.right.data is not None: print "\t\t" + str(rootnode.left.right.right.data)+ "(" + str(rootnode.left.right.right.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.left is not None and rootnode.right.left.left is not None and rootnode.right.left.left.data is not None: print "\t" + str(rootnode.right.left.left.data)+ "(" + str(rootnode.right.left.left.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.left is not None and rootnode.right.left.right is not None and rootnode.right.left.right.data is not None: print "\t\t" + str(rootnode.right.left.right.data)+ "(" + str(rootnode.right.left.right.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.right is not None and rootnode.right.right.left is not None and rootnode.right.right.left.data is not None: print "\t\t\t\t\t\t\t" + str(rootnode.right.right.left.data)+ "(" + str(rootnode.right.right.left.bin) + ")", if rootnode is not None and rootnode.right is not None and rootnode.right.right is not None and rootnode.right.right.right is not None and rootnode.right.right.right.data is not None: print "\t\t\t\t" + str(rootnode.right.right.right.data)+ "(" + str(rootnode.right.right.right.bin) + ")" + "\n" def mylog(x,base): if x == 0: return 0 else: return log(x,base) def mydiv(num,den): if num == 0: return 0 else: return (1.0*num)/(1.0*den) # This method checks if word is present in doc def isPresent(doc,word): return True if doc.get(word)>0 else False # This method reads the documents and stores them in a dictionary def readData(directory): words = {} wordcount = {} docLength = {} subdirectories = [f for f in listdir(join(os.getcwd(), directory)) if isdir(join(os.getcwd(), directory, f))] data = {} for topic in subdirectories: data[topic] = {} docLength[topic] = defaultdict(float) for f in listdir(join(os.getcwd(), directory, topic)): wordvector = defaultdict(float) for line in open(join(os.getcwd(), directory, topic, f)): for word in line.split(): word = word.strip().lower() wordvector[word] += 1 words[word] = 1 docLength[topic][f] += 1 data[topic][f] = wordvector for topic in subdirectories: for doc in data[topic].keys(): for word in data[topic][doc]: data[topic][doc][word] = (data[topic][doc][word]*1.0)/(1.0*docLength[topic][doc]) for topic in subdirectories: wordcount[topic] = {} for abin in binslist: wordcount[topic][abin] = {} for topic in subdirectories: for doc in data[topic]: for word in data[topic][doc]: mybin = binFinder(data[topic][doc][word]) if word not in wordcount[topic][mybin]: wordcount[topic][mybin][word] = 1 else: wordcount[topic][mybin][word] += 1 return (data,words,wordcount) # This method is recursively called to split the dataset and get the word with minimum avg disorder (entropy) def generateDecisionTree(trained_data,words,wordcount): if len(trained_data['notspam'])==0: node = Tree() node.data = True node.isLeaf = True # print str(True) return node if len(trained_data['spam'])==0: node = Tree() node.data = False node.isLeaf = True # print str(False) return node minpred = 9999 minpredbin = 0 minpredword = '' for word in words: for abin in binslist: havingword = wordcount['notspam'][abin].get(word,0) + wordcount['spam'][abin].get(word,0) total = len(trained_data['spam']) + len(trained_data['notspam']) nothavingword = total - havingword spamhavingword = wordcount['spam'][abin].get(word,0) spamnothavingword = len(trained_data['spam']) - spamhavingword notspamhavingword = wordcount['notspam'][abin].get(word,0) notspamnothavingword = len(trained_data['notspam']) - notspamhavingword avgdisorder = \ (mydiv(havingword,total))* \ (-(mydiv(spamhavingword,havingword))*mylog(mydiv(spamhavingword,havingword),2) \ - (mydiv(notspamhavingword,havingword))*mylog(mydiv(notspamhavingword,havingword),2)) \ + \ (mydiv(nothavingword,total))* \ (- (mydiv(spamnothavingword,nothavingword))*mylog(mydiv(spamnothavingword,nothavingword),2) \ - (mydiv(notspamnothavingword,nothavingword))*mylog(mydiv(notspamnothavingword,nothavingword),2)) if minpred > avgdisorder: minpred = avgdisorder minpredword = word minpredbin = abin # print minpredword # print minpred # print minpredbin # print "Total Number of Documents: " + str(total) # print "Number of Documents Containing Word: " + word + " " +str(havingword) # print "Out of those spam are: " + str(spamhavingword) # print "Out of those not spam are: " + str(notspamhavingword) # print "\nNumber of Documents Not Containing Word: " + word + " " +str(nothavingword) # print "Out of those spam are: " + str(spamnothavingword) # print "Out of those not spam are: " + str(notspamnothavingword) # print "Average Disorder is: " + str(avgdisorder) # New Split Up Dataset and Counts new_trained_data_without_word = {} new_trained_data_without_word['spam'] = {} new_trained_data_without_word['notspam'] = {} new_trained_data_with_word = {} new_trained_data_with_word['spam'] = {} new_trained_data_with_word['notspam'] = {} wordlist_with = {} wordlist_without = {} for topic in ['spam','notspam']: for doc in trained_data[topic]: # if trained_data[topic][doc].get(minpredword,0) == 0 or trained_data[topic][doc].get(minpredword,0) > minpredbin or trained_data[topic][doc].get(minpredword,0) < (minpredbin-0.1): # if minpredbin == binslist[0]: # if trained_data[topic][doc].get(minpredword,0) == 0 or trained_data[topic][doc].get(minpredword,0) > minpredbin: # new_trained_data_without_word[topic][doc] = trained_data[topic][doc] # else: # new_trained_data_with_word[topic][doc] = trained_data[topic][doc] # else: if trained_data[topic][doc].get(minpredword,0) == 0 or trained_data[topic][doc].get(minpredword,0) > minpredbin or (minpredbin != binslist[0] and trained_data[topic][doc].get(minpredword,0) < (minpredbin-binslist[binslist.index(minpredbin)-1])): new_trained_data_without_word[topic][doc] = trained_data[topic][doc] else: new_trained_data_with_word[topic][doc] = trained_data[topic][doc] wordcount_with = {} wordcount_without = {} for topic in ['spam','notspam']: wordcount_with[topic]={} wordcount_without[topic]={} for abin in binslist: wordcount_with[topic][abin] = {} wordcount_without[topic][abin] = {} for topic in ['spam','notspam']: for doc in new_trained_data_with_word[topic]: for word in new_trained_data_with_word[topic][doc]: wordlist_with[word] = 1 mybin = binFinder(new_trained_data_with_word[topic][doc][word]) if word not in wordcount_with[topic][mybin]: wordcount_with[topic][mybin][word] = 1 else: wordcount_with[topic][mybin][word] += 1 for topic in ['spam','notspam']: for doc in new_trained_data_without_word[topic]: for word in new_trained_data_without_word[topic][doc]: wordlist_without[word] = 1 mybin = binFinder(new_trained_data_without_word[topic][doc][word]) if word not in wordcount_without[topic][mybin]: wordcount_without[topic][mybin][word] = 1 else: wordcount_without[topic][mybin][word] += 1 # print minpredword cantuselist.append(minpredword) for word in cantuselist: if word in wordlist_with: del wordlist_with[word] if word in wordlist_without: del wordlist_without[word] root = Tree() root.bin = minpredbin root.data = minpredword root.left = generateDecisionTree(new_trained_data_with_word,wordlist_with,wordcount_with) root.right = generateDecisionTree(new_trained_data_without_word,wordlist_without,wordcount_without) return root # This method is used to walk on the decision tree and classify the documents based on the boolean value found on the leaf nodes. def classifyDocs(trained_data, rootnode): rootofDT = rootnode spamcorrectcount = 0 notspamcorrectcount = 0 for topic in trained_data.keys(): for doc in trained_data[topic].keys(): while rootnode.isLeaf == False: if isPresent(trained_data[topic][doc],rootnode.data): rootnode = rootnode.left else: rootnode = rootnode.right if rootnode.data == True and topic == 'spam': spamcorrectcount += 1 elif rootnode.data == False and topic == 'notspam': notspamcorrectcount += 1 rootnode = rootofDT correctcount = spamcorrectcount + notspamcorrectcount total = len(trained_data['spam']) + len(trained_data['notspam']) print "Finished Classification" print "Percentage Accuracy: " + str(100.0*correctcount/total) + " %" print "Confusion Matrix:" print "_______________________________________________________" print "| | Predicted Spam | Predicted Not-Spam|" print "|Actual Spam | " + str(spamcorrectcount) + " | " + \ str(len(trained_data['spam'])-spamcorrectcount) + " |" print "|Actual Not-Spam | " + str(len(trained_data['notspam'])-notspamcorrectcount) + \ " | " + str(notspamcorrectcount) + " |"
{ "content_hash": "6e405af4c948712a0b19d90f0426504b", "timestamp": "", "source": "github", "line_count": 306, "max_line_length": 257, "avg_line_length": 49.51960784313726, "alnum_prop": 0.6209991420840758, "repo_name": "yashketkar/B551-Elements-Of-Artificial-Intelligence", "id": "ecb1cebd512e8cf797d5916fa12ba6fab3494b4a", "size": "15153", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pssapre-sdarekar-yketkar-a4/modules/spam_dt_cont.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "217044" }, { "name": "Shell", "bytes": "578" } ], "symlink_target": "" }
from tkinter import * """class yang berkaitan dengan proses penulisan file riwayat permainan""" class GameHistory: def __init__(self): self.window = Tk() self.window.title("Riwayat Permainan") self.historyText = Text(self.window) historyFile = open("gamehistory.txt", "r+") self.historyText.insert("1.0", historyFile.read()) historyFile.close() self.historyText.pack() self.window.mainloop()
{ "content_hash": "4cdc21c1669e6a3f9c3c1fb440aec0cd", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 73, "avg_line_length": 28.9375, "alnum_prop": 0.6393088552915767, "repo_name": "argaghulamahmad/MNK-Game", "id": "ff25bc8f37b91a479b8a9f669f2a6ba1f33f2d72", "size": "463", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "GameHistory.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "16303" } ], "symlink_target": "" }
import re from webtest import response class TestResponse(response.TestResponse): '''Same as WebTest's TestResponse but adds basic HTTP authentication to ``click`` and ``clickbutton``. ''' def click(self, description=None, linkid=None, href=None, index=None, verbose=False, extra_environ=None, auth=None, auth_type=None): """ Click the link as described. Each of ``description``, ``linkid``, and ``url`` are *patterns*, meaning that they are either strings (regular expressions), compiled regular expressions (objects with a ``search`` method), or callables returning true or false. All the given patterns are ANDed together: * ``description`` is a pattern that matches the contents of the anchor (HTML and all -- everything between ``<a...>`` and ``</a>``) * ``linkid`` is a pattern that matches the ``id`` attribute of the anchor. It will receive the empty string if no id is given. * ``href`` is a pattern that matches the ``href`` of the anchor; the literal content of that attribute, not the fully qualified attribute. If more than one link matches, then the ``index`` link is followed. If ``index`` is not given and more than one link matches, or if no link matches, then ``IndexError`` will be raised. If you give ``verbose`` then messages will be printed about each link, and why it does or doesn't match. If you use ``app.click(verbose=True)`` you'll see a list of all the links. You can use multiple criteria to essentially assert multiple aspects about the link, e.g., where the link's destination is. """ found_html, found_desc, found_attrs = self._find_element( tag='a', href_attr='href', href_extract=None, content=description, id=linkid, href_pattern=href, index=index, verbose=verbose) auth = auth or self.test_app.auth auth_type = auth_type or self.test_app.auth_type return self.goto(str(found_attrs['uri']), extra_environ=extra_environ, auth=auth, auth_type=auth_type) def clickbutton(self, description=None, buttonid=None, href=None, index=None, verbose=False, auth=None, auth_type=None): """ Like :meth:`~webtest.response.TestResponse.click`, except looks for link-like buttons. This kind of button should look like ``<button onclick="...location.href='url'...">``. """ found_html, found_desc, found_attrs = self._find_element( tag='button', href_attr='onclick', href_extract=re.compile(r"location\.href='(.*?)'"), content=description, id=buttonid, href_pattern=href, index=index, verbose=verbose) auth = auth or self.test_app.auth auth_type = auth_type or self.test_app.auth_type return self.goto(str(found_attrs['uri']), auth=auth, auth_type=auth_type)
{ "content_hash": "768cdc40a42613428d7b35672aae6626", "timestamp": "", "source": "github", "line_count": 77, "max_line_length": 81, "avg_line_length": 41.142857142857146, "alnum_prop": 0.6022727272727273, "repo_name": "sloria/webtest-plus", "id": "bd69d56d98bd4f35ac405247d9cf4bf8f1b2970f", "size": "3192", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "webtest_plus/response.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24009" } ], "symlink_target": "" }
""" A driver for XenServer or Xen Cloud Platform. **Related Flags** :xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. :xenapi_connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). :xenapi_connection_password: Password for connection to XenServer/Xen Cloud Platform. :target_host: the iSCSI Target Host IP address, i.e. the IP address for the nova-volume host :target_port: iSCSI Target Port, 3260 Default :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' **Variable Naming Scheme** - suffix "_ref" for opaque references - suffix "_uuid" for UUIDs - suffix "_rec" for record objects """ import contextlib import cPickle as pickle import time import urlparse import xmlrpclib from eventlet import queue from eventlet import timeout from nova import context from nova import db from nova import exception from nova import flags from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.xenapi import host from nova.virt.xenapi import pool from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vmops from nova.virt.xenapi import volumeops LOG = logging.getLogger(__name__) xenapi_opts = [ cfg.StrOpt('xenapi_connection_url', default=None, help='URL for connection to XenServer/Xen Cloud Platform. ' 'Required if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('xenapi_connection_username', default='root', help='Username for connection to XenServer/Xen Cloud Platform. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('xenapi_connection_password', default=None, help='Password for connection to XenServer/Xen Cloud Platform. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.IntOpt('xenapi_connection_concurrent', default=5, help='Maximum number of concurrent XenAPI connections. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval', default=5.0, help='The interval used for polling of coalescing vhds. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.BoolOpt('xenapi_check_host', default=True, help='Ensure compute service is running on host XenAPI ' 'connects to.'), cfg.IntOpt('xenapi_vhd_coalesce_max_attempts', default=5, help='Max number of times to poll for VHD to coalesce. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('xenapi_agent_path', default='usr/sbin/xe-update-networking', help='Specifies the path in which the xenapi guest agent ' 'should be located. If the agent is present, network ' 'configuration is not injected into the image. ' 'Used if compute_driver=xenapi.XenAPIDriver and ' ' flat_injected=True'), cfg.StrOpt('xenapi_sr_base_path', default='/var/run/sr-mount', help='Base path to the storage repository'), cfg.StrOpt('target_host', default=None, help='iSCSI Target Host'), cfg.StrOpt('target_port', default='3260', help='iSCSI Target Port, 3260 Default'), cfg.StrOpt('iqn_prefix', default='iqn.2010-10.org.openstack', help='IQN Prefix'), # NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, # when we pull support for it, we should remove this cfg.BoolOpt('xenapi_remap_vbd_dev', default=False, help='Used to enable the remapping of VBD dev ' '(Works around an issue in Ubuntu Maverick)'), cfg.StrOpt('xenapi_remap_vbd_dev_prefix', default='sd', help='Specify prefix to remap VBD dev to ' '(ex. /dev/xvdb -> /dev/sdb)'), cfg.IntOpt('xenapi_login_timeout', default=10, help='Timeout in seconds for XenAPI login.'), ] FLAGS = flags.FLAGS FLAGS.register_opts(xenapi_opts) class XenAPIDriver(driver.ComputeDriver): """A connection to XenServer or Xen Cloud Platform""" def __init__(self, read_only=False): super(XenAPIDriver, self).__init__() url = FLAGS.xenapi_connection_url username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: raise Exception(_('Must specify xenapi_connection_url, ' 'xenapi_connection_username (optionally), and ' 'xenapi_connection_password to use ' 'compute_driver=xenapi.XenAPIDriver')) self._session = XenAPISession(url, username, password) self._volumeops = volumeops.VolumeOps(self._session) self._host_state = None self._host = host.Host(self._session) self._vmops = vmops.VMOps(self._session) self._initiator = None self._hypervisor_hostname = None self._pool = pool.ResourcePool(self._session) @property def host_state(self): if not self._host_state: self._host_state = host.HostState(self._session) return self._host_state def init_host(self, host): if FLAGS.xenapi_check_host: vm_utils.ensure_correct_host(self._session) try: vm_utils.cleanup_attached_vdis(self._session) except Exception: LOG.exception(_('Failure while cleaning up attached VDIs')) def list_instances(self): """List VM instances""" return self._vmops.list_instances() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create VM instance""" self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM""" # TODO(Vek): Need to pass context in for access to auth_token self._vmops.confirm_migration(migration, instance, network_info) def finish_revert_migration(self, instance, network_info, block_device_info=None): """Finish reverting a resize, powering back on the instance""" # NOTE(vish): Xen currently does not use network info. self._vmops.finish_revert_migration(instance) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mount_device = vol['mount_device'].rpartition("/")[2] self._volumeops.attach_volume(connection_info, instance['name'], mount_device) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None): """Completes a resize, turning on the migrated instance""" self._vmops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mount_device = vol['mount_device'].rpartition("/")[2] self._volumeops.attach_volume(connection_info, instance['name'], mount_device) def snapshot(self, context, instance, image_id): """ Create snapshot from a running VM instance """ self._vmops.snapshot(context, instance, image_id) def reboot(self, instance, network_info, reboot_type, block_device_info=None): """Reboot VM instance""" self._vmops.reboot(instance, reboot_type) def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance""" self._vmops.set_admin_password(instance, new_pass) def inject_file(self, instance, b64_path, b64_contents): """Create a file on the VM instance. The file path and contents should be base64-encoded. """ self._vmops.inject_file(instance, b64_path, b64_contents) def change_instance_metadata(self, context, instance, diff): """Apply a diff to the instance metadata.""" self._vmops.change_instance_metadata(instance, diff) def destroy(self, instance, network_info, block_device_info=None): """Destroy VM instance""" self._vmops.destroy(instance, network_info, block_device_info) def pause(self, instance): """Pause VM instance""" self._vmops.pause(instance) def unpause(self, instance): """Unpause paused VM instance""" self._vmops.unpause(instance) def migrate_disk_and_power_off(self, context, instance, dest, instance_type, network_info, block_device_info=None): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" # NOTE(vish): Xen currently does not use network info. rv = self._vmops.migrate_disk_and_power_off(context, instance, dest, instance_type) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) name_label = self._vmops._get_orig_vm_name_label(instance) for vol in block_device_mapping: connection_info = vol['connection_info'] mount_device = vol['mount_device'].rpartition("/")[2] self._volumeops.detach_volume(connection_info, name_label, mount_device) return rv def suspend(self, instance): """suspend the specified instance""" self._vmops.suspend(instance) def resume(self, instance): """resume the specified instance""" self._vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance""" self._vmops.rescue(context, instance, network_info, image_meta, rescue_password) def unrescue(self, instance, network_info): """Unrescue the specified instance""" self._vmops.unrescue(instance) def power_off(self, instance): """Power off the specified instance""" self._vmops.power_off(instance) def power_on(self, instance): """Power on the specified instance""" self._vmops.power_on(instance) def poll_rebooting_instances(self, timeout): """Poll for rebooting instances""" self._vmops.poll_rebooting_instances(timeout) def poll_rescued_instances(self, timeout): """Poll for rescued instances""" self._vmops.poll_rescued_instances(timeout) def reset_network(self, instance): """reset networking for specified instance""" self._vmops.reset_network(instance) def inject_network_info(self, instance, network_info): """inject network info for specified instance""" self._vmops.inject_network_info(instance, network_info) def plug_vifs(self, instance_ref, network_info): """Plug VIFs into networks.""" self._vmops.plug_vifs(instance_ref, network_info) def unplug_vifs(self, instance_ref, network_info): """Unplug VIFs from networks.""" self._vmops.unplug_vifs(instance_ref, network_info) def get_info(self, instance): """Return data about VM instance""" return self._vmops.get_info(instance) def get_diagnostics(self, instance): """Return data about VM diagnostics""" return self._vmops.get_diagnostics(instance) def get_all_bw_usage(self, instances, start_time, stop_time=None): """Return bandwidth usage info for each interface on each running VM""" # we only care about VMs that correspond to a nova-managed # instance: imap = dict([(inst.name, inst.uuid) for inst in instances]) bwusage = [] start_time = time.mktime(start_time.timetuple()) if stop_time: stop_time = time.mktime(stop_time.timetuple()) # get a dictionary of instance names. values are dictionaries # of mac addresses with values that are the bw stats: # e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}} iusages = self._vmops.get_all_bw_usage(start_time, stop_time) for instance_name in iusages: if instance_name in imap: # yes these are stats for a nova-managed vm # correlate the stats with the nova instance uuid: iusage = iusages[instance_name] for macaddr, usage in iusage.iteritems(): bwusage.append(dict(mac_address=macaddr, uuid=imap[instance_name], bw_in=usage['bw_in'], bw_out=usage['bw_out'])) return bwusage def get_console_output(self, instance): """Return snapshot of console""" return self._vmops.get_console_output(instance) def get_vnc_console(self, instance): """Return link to instance's VNC console""" return self._vmops.get_vnc_console(instance) def get_volume_connector(self, instance): """Return volume connector information""" if not self._initiator or not self._hypervisor_hostname: stats = self.get_host_stats(refresh=True) try: self._initiator = stats['host_other-config']['iscsi_iqn'] self._hypervisor_hostname = stats['host_hostname'] except (TypeError, KeyError) as err: LOG.warn(_('Could not determine key: %s') % err, instance=instance) self._initiator = None return { 'ip': self.get_host_ip_addr(), 'initiator': self._initiator, 'host': self._hypervisor_hostname } @staticmethod def get_host_ip_addr(): xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) return xs_url.netloc def attach_volume(self, connection_info, instance_name, mountpoint): """Attach volume storage to VM instance""" return self._volumeops.attach_volume(connection_info, instance_name, mountpoint) def detach_volume(self, connection_info, instance_name, mountpoint): """Detach volume storage to VM instance""" return self._volumeops.detach_volume(connection_info, instance_name, mountpoint) def get_console_pool_info(self, console_type): xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) return {'address': xs_url.netloc, 'username': FLAGS.xenapi_connection_username, 'password': FLAGS.xenapi_connection_password} def get_available_resource(self): """Retrieve resource info. This method is called when nova-compute launches, and as part of a periodic task. :returns: dictionary describing resources """ host_stats = self.get_host_stats(refresh=True) # Updating host information total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024) # NOTE(belliott) memory-free-computed is a value provided by XenServer # for gauging free memory more conservatively than memory-free. free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024) total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024) used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024) dic = {'vcpus': 0, 'memory_mb': total_ram_mb, 'local_gb': total_disk_gb, 'vcpus_used': 0, 'memory_mb_used': total_ram_mb - free_ram_mb, 'local_gb_used': used_disk_gb, 'hypervisor_type': 'xen', 'hypervisor_version': 0, 'hypervisor_hostname': host_stats['host_hostname'], 'cpu_info': host_stats['host_cpu_info']['cpu_count']} return dic def ensure_filtering_rules_for_instance(self, instance_ref, network_info): # NOTE(salvatore-orlando): it enforces security groups on # host initialization and live migration. # In XenAPI we do not assume instances running upon host initialization return def check_can_live_migrate_destination(self, ctxt, instance_ref, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit """ return self._vmops.check_can_live_migrate_destination(ctxt, instance_ref, block_migration, disk_over_commit) def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param ctxt: security context :param disk_over_commit: if true, allow disk over commit """ pass def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination includes the block_migration flag """ self._vmops.check_can_live_migrate_source(ctxt, instance_ref, dest_check_data) def get_instance_disk_info(self, instance_name): """Used by libvirt for live migration. We rely on xenapi checks to do this for us.""" pass def pre_block_migration(self, ctxt, instance_ref, disk_info_json): """Used by libvirt for live migration. We rely on xenapi checks to do this for us. May be used in the future to populate the vdi/vif maps""" pass def live_migration(self, ctxt, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Performs the live migration of the specified instance. :params ctxt: security context :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :params block_migration: if true, migrate VM disk. :params migrate_data: implementation specific params """ self._vmops.live_migrate(ctxt, instance_ref, dest, post_method, recover_method, block_migration, migrate_data) def pre_live_migration(self, context, instance_ref, block_device_info, network_info): """Preparation live migration. :params block_device_info: It must be the result of _get_instance_volume_bdms() at compute manager. """ # TODO(JohnGarbutt) look again when boot-from-volume hits trunk pass def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_migration): """Post operation of live migration at destination host. :params ctxt: security context :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params network_info: instance network infomation :params : block_migration: if true, post operation of block_migraiton. """ # TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel pass def unfilter_instance(self, instance_ref, network_info): """Removes security groups configured for an instance.""" return self._vmops.unfilter_instance(instance_ref, network_info) def refresh_security_group_rules(self, security_group_id): """ Updates security group rules for all instances associated with a given security group Invoked when security group rules are updated """ return self._vmops.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): """ Updates security group rules for all instances associated with a given security group Invoked when instances are added/removed to a security group """ return self._vmops.refresh_security_group_members(security_group_id) def refresh_instance_security_rules(self, instance): """ Updates security group rules for specified instance Invoked when instances are added/removed to a security group or when a rule is added/removed to a security group """ return self._vmops.refresh_instance_security_rules(instance) def refresh_provider_fw_rules(self): return self._vmops.refresh_provider_fw_rules() def update_host_status(self): """Update the status info of the host, and return those values to the calling program.""" return self.host_state.update_status() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first.""" return self.host_state.get_host_stats(refresh=refresh) def host_power_action(self, host, action): """The only valid values for 'action' on XenServer are 'reboot' or 'shutdown', even though the API also accepts 'startup'. As this is not technically possible on XenServer, since the host is the same physical machine as the hypervisor, if this is requested, we need to raise an exception. """ if action in ("reboot", "shutdown"): return self._host.host_power_action(host, action) else: msg = _("Host startup on XenServer is not supported.") raise NotImplementedError(msg) def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._host.set_host_enabled(host, enabled) def get_host_uptime(self, host): """Returns the result of calling "uptime" on the target host.""" return self._host.get_host_uptime(host) def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" return self._host.host_maintenance_mode(host, mode) def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate.""" return self._pool.add_to_aggregate(context, aggregate, host, **kwargs) def remove_from_aggregate(self, context, aggregate, host, **kwargs): """Remove a compute host from an aggregate.""" return self._pool.remove_from_aggregate(context, aggregate, host, **kwargs) def undo_aggregate_operation(self, context, op, aggregate_id, host, set_error=True): """Undo aggregate operation when pool error raised""" return self._pool.undo_aggregate_operation(context, op, aggregate_id, host, set_error) def legacy_nwinfo(self): """ Indicate if the driver requires the legacy network_info format. """ # TODO(tr3buchet): remove this function once all virts return false return False class XenAPISession(object): """The session to invoke XenAPI SDK calls""" def __init__(self, url, user, pw): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() self.is_slave = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.product_version, self.product_brand = \ self._get_product_version_and_brand() def _create_first_session(self, url, user, pw, exception): try: session = self._create_session(url) with timeout.Timeout(FLAGS.xenapi_login_timeout, exception): session.login_with_password(user, pw) except self.XenAPI.Failure, e: # if user and pw of the master are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': master = e.details[1] url = pool.swap_xapi_host(url, master) session = self.XenAPI.Session(url) session.login_with_password(user, pw) self.is_slave = True else: raise self._sessions.put(session) return url def _populate_session_pool(self, url, user, pw, exception): for i in xrange(FLAGS.xenapi_connection_concurrent - 1): session = self._create_session(url) with timeout.Timeout(FLAGS.xenapi_login_timeout, exception): session.login_with_password(user, pw) self._sessions.put(session) def _get_host_uuid(self): if self.is_slave: aggr = db.aggregate_get_by_host(context.get_admin_context(), FLAGS.host, key=pool_states.POOL_FLAG)[0] if not aggr: LOG.error(_('Host is member of a pool, but DB ' 'says otherwise')) raise exception.AggregateHostNotFound() return aggr.metadetails[FLAGS.host] else: with self._get_session() as session: host_ref = session.xenapi.session.get_this_host(session.handle) return session.xenapi.host.get_uuid(host_ref) def _get_product_version_and_brand(self): """Return a tuple of (major, minor, rev) for the host version and a string of the product brand""" software_version = self._get_software_version() product_version_str = software_version.get('product_version') product_brand = software_version.get('product_brand') if None in (product_version_str, product_brand): return (None, None) product_version = tuple(int(part) for part in product_version_str.split('.')) return product_version, product_brand def _get_software_version(self): host = self.get_xenapi_host() return self.call_xenapi('host.get_software_version', host) def get_session_id(self): """Return a string session_id. Used for vnc consoles.""" with self._get_session() as session: return str(session._session) @contextlib.contextmanager def _get_session(self): """Return exclusive session for scope of with statement""" session = self._sessions.get() try: yield session finally: self._sessions.put(session) def get_xenapi_host(self): """Return the xenapi host on which nova-compute runs on.""" with self._get_session() as session: return session.xenapi.host.get_by_uuid(self.host_uuid) def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread.""" with self._get_session() as session: return session.xenapi_request(method, args) def call_plugin(self, plugin, fn, args): """Call host.call_plugin on a background thread.""" # NOTE(johannes): Fetch host before we acquire a session. Since # get_xenapi_host() acquires a session too, it can result in a # deadlock if multiple greenthreads race with each other. See # bug 924918 host = self.get_xenapi_host() # NOTE(armando): pass the host uuid along with the args so that # the plugin gets executed on the right host when using XS pools args['host_uuid'] = self.host_uuid with self._get_session() as session: return self._unwrap_plugin_exceptions( session.xenapi.host.call_plugin, host, plugin, fn, args) def call_plugin_serialized(self, plugin, fn, *args, **kwargs): params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))} rv = self.call_plugin(plugin, fn, params) return pickle.loads(rv) def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" return self.XenAPI.Session(url) def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details""" try: return func(*args, **kwargs) except self.XenAPI.Failure, exc: LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): params = None try: # FIXME(comstud): eval is evil. params = eval(exc.details[3]) except Exception: raise exc raise self.XenAPI.Failure(params) else: raise except xmlrpclib.ProtocolError, exc: LOG.debug(_("Got exception: %s"), exc) raise def get_rec(self, record_type, ref): try: return self.call_xenapi('%s.get_record' % record_type, ref) except self.XenAPI.Failure, e: if e.details[0] != 'HANDLE_INVALID': raise return None def get_all_refs_and_recs(self, record_type): """Retrieve all refs and recs for a Xen record type. Handles race-conditions where the record may be deleted between the `get_all` call and the `get_record` call. """ for ref in self.call_xenapi('%s.get_all' % record_type): rec = self.get_rec(record_type, ref) # Check to make sure the record still exists. It may have # been deleted between the get_all call and get_record call if rec: yield ref, rec
{ "content_hash": "739a2b239bbc55f2a938da166c3b8d7f", "timestamp": "", "source": "github", "line_count": 775, "max_line_length": 79, "avg_line_length": 41.87741935483871, "alnum_prop": 0.597134493914651, "repo_name": "paulmathews/nova", "id": "79ac2d44aa81a6bf334ac803329d1e6e62ee1133", "size": "33149", "binary": false, "copies": "2", "ref": "refs/heads/stable/folsom", "path": "nova/virt/xenapi/driver.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "16002" }, { "name": "JavaScript", "bytes": "7403" }, { "name": "Python", "bytes": "7293434" }, { "name": "Shell", "bytes": "16910" } ], "symlink_target": "" }
from __future__ import with_statement from cms.test_utils.compat import skipIf from cms.test_utils.util.context_managers import SettingsOverride, StdoutOverride, TemporaryDirectory import django from django.core import management from django.test import TestCase from distutils.version import LooseVersion class StaticFilesTest(TestCase): @skipIf(LooseVersion(django.get_version()) < LooseVersion('1.4'), "CachedStaticFilesStorage doesn't exist in Django < 1.4") def test_collectstatic_with_cached_static_files_storage(self): # CachedStaticFilesStorage requires that the CSS files # don't contain any broken links. with TemporaryDirectory() as tmpdir: with SettingsOverride(STATIC_ROOT=tmpdir, STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage'): with StdoutOverride() as output: management.call_command('collectstatic', interactive=False)
{ "content_hash": "8d4c4cb419b21089b9c01ea31461f0f5", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 101, "avg_line_length": 44.68181818181818, "alnum_prop": 0.7324516785350966, "repo_name": "adaptivelogic/django-cms", "id": "524ff4dd6fbfce9310ef63affaaf0a2048935fed", "size": "983", "binary": false, "copies": "1", "ref": "refs/heads/refactor-viewperms", "path": "cms/tests/staticfiles.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "743058" }, { "name": "PHP", "bytes": "2156" }, { "name": "Python", "bytes": "2174742" }, { "name": "XSLT", "bytes": "5122" } ], "symlink_target": "" }
"""This file contains code for interacting with terminals. All the terminal interaction code is consolidated so the complexity can be in one place, away from code that is commonly looked at. """ from __future__ import print_function, unicode_literals import logging import sys class LoggingHandler(logging.Handler): """Custom logging handler that works with terminal window dressing. This is alternative terminal logging handler which contains smarts for emitting terminal control characters properly. Currently, it has generic support for "footer" elements at the bottom of the screen. Functionality can be added when needed. """ def __init__(self): logging.Handler.__init__(self) self.fh = sys.stdout self.footer = None def flush(self): self.acquire() try: self.fh.flush() finally: self.release() def emit(self, record): msg = self.format(record) if self.footer: self.footer.clear() self.fh.write(msg) self.fh.write('\n') if self.footer: self.footer.draw() # If we don't flush, the footer may not get drawn. self.flush() class TerminalFooter(object): """Represents something drawn on the bottom of a terminal.""" def __init__(self, terminal): self.t = terminal self.fh = sys.stdout def _clear_lines(self, n): for i in xrange(n): self.fh.write(self.t.move_x(0)) self.fh.write(self.t.clear_eol()) self.fh.write(self.t.move_up()) self.fh.write(self.t.move_down()) self.fh.write(self.t.move_x(0)) def clear(self): raise Exception('clear() must be implemented.') def draw(self): raise Exception('draw() must be implemented.')
{ "content_hash": "b095059458627038cb0c3ea6dda6de56", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 77, "avg_line_length": 25.971830985915492, "alnum_prop": 0.6203904555314533, "repo_name": "wilebeast/FireFox-OS", "id": "cdc3966575e62d7540600ef5743dc1c03d2baba7", "size": "2044", "binary": false, "copies": "33", "ref": "refs/heads/master", "path": "B2G/gecko/python/mach/mach/terminal.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from django.template import Template from django.test.utils import override_settings from cms.api import create_page from cms.models import Page from cms.templatetags.cms_admin import preview_link from cms.test_utils.testcases import CMSTestCase from cms.utils.i18n import force_language from menus.base import NavigationNode @override_settings(ROOT_URLCONF='cms.test_utils.project.nonroot_urls') class NonRootCase(CMSTestCase): def setUp(self): u = self._create_user("test", True, True) with self.login_user_context(u): self.create_some_pages() def create_some_pages(self): """ Creates the following structure: + P1 | + P2 | + P3 + P4 """ self.page1 = self.create_homepage( title="page1", template="nav_playground.html", language="en", published=True, in_navigation=True, ) self.page2 = create_page("page2", "nav_playground.html", "en", parent=self.page1, published=True, in_navigation=True) self.page3 = create_page("page3", "nav_playground.html", "en", parent=self.page2, published=True, in_navigation=True) self.page4 = create_page("page4", "nav_playground.html", "en", published=True, in_navigation=True) self.all_pages = [self.page1, self.page2, self.page3, self.page4] self.top_level_pages = [self.page1, self.page4] self.level1_pages = [self.page2] self.level2_pages = [self.page3] def test_get_page_root(self): self.assertEqual(self.get_pages_root(), '/en/content/') def test_basic_cms_menu(self): response = self.client.get(self.get_pages_root()) self.assertEqual(response.status_code, 200) self.assertEqual(self.get_pages_root(), "/en/content/") def test_show_menu(self): context = self.get_context() tpl = Template("{% load menu_tags %}{% show_menu %}") tpl.render(context) nodes = context['children'] self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root()) self.assertEqual(nodes[0].get_absolute_url(), "/en/content/") def test_show_breadcrumb(self): page2 = Page.objects.get(pk=self.page2.pk) context = self.get_context(path=self.page2.get_absolute_url(), page=self.page2.publisher_public) tpl = Template("{% load menu_tags %}{% show_breadcrumb %}") tpl.render(context) nodes = context['ancestors'] self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root()) self.assertEqual(nodes[0].get_absolute_url(), "/en/content/") self.assertEqual(isinstance(nodes[0], NavigationNode), True) self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url()) def test_form_multilingual_admin(self): """ Tests for correct form URL mangling in preview_link templatetag """ language = 'en' with force_language("en"): pages_root = self.get_pages_root() link = preview_link(self.page2,language=language) self.assertEqual(link,'%s%s/' % (pages_root,self.page2.get_slug())) self.assertEqual(link,'/en/content/page2/')
{ "content_hash": "f49ac6c90b219b5f8e57b96c67b03bfe", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 104, "avg_line_length": 39.54761904761905, "alnum_prop": 0.614990969295605, "repo_name": "rsalmaso/django-cms", "id": "06e926f3757970f72cddddbbfb61288b10a86b23", "size": "3322", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "cms/tests/test_nonroot.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "204223" }, { "name": "JavaScript", "bytes": "1250281" }, { "name": "Python", "bytes": "2386268" }, { "name": "SCSS", "bytes": "137693" }, { "name": "Shell", "bytes": "22511" } ], "symlink_target": "" }
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod from federatedml.param.feature_selection_param import UniqueValueParam from federatedml.protobuf.generated import feature_selection_meta_pb2 from federatedml.statistic.statics import MultivariateStatisticalSummary import math class UniqueValueFilter(BaseFilterMethod): """ filter the columns if all values in this feature is the same """ def __init__(self, filter_param: UniqueValueParam): super().__init__(filter_param) self.statics_obj = None def _parse_filter_param(self, filter_param): self.eps = filter_param.eps def set_statics_obj(self, statics_obj): self.statics_obj = statics_obj def fit(self, data_instances, suffix): if self.statics_obj is None: self.statics_obj = MultivariateStatisticalSummary(data_instances) max_values = self.statics_obj.get_max() min_values = self.statics_obj.get_min() for col_name in self.selection_properties.select_col_names: min_max_diff = math.fabs(max_values[col_name] - min_values[col_name]) if min_max_diff >= self.eps: self.selection_properties.add_left_col_name(col_name) self.selection_properties.add_feature_value(col_name, min_max_diff) self._keep_one_feature(pick_high=True) return self def get_meta_obj(self): result = feature_selection_meta_pb2.FilterMeta() return result
{ "content_hash": "f75fb1bf53108c3a371276702d20d5b7", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 81, "avg_line_length": 36.65853658536585, "alnum_prop": 0.6879574184963406, "repo_name": "FederatedAI/FATE", "id": "380f16f03756f518932b2887dd308cd57bc509c0", "size": "2165", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/federatedml/feature/feature_selection/unique_value_filter.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Lua", "bytes": "19716" }, { "name": "Python", "bytes": "5121767" }, { "name": "Rust", "bytes": "3971" }, { "name": "Shell", "bytes": "19676" } ], "symlink_target": "" }
# -*- coding: utf-8 -*- # ProjectEuler/src/python/problem055.py # # Lychrel numbers # =============== # Published on Friday, 24th October 2003, 06:00 pm # # If we take 47, reverse and add, 47 + 74 = 121, which is palindromic. Not all # numbers produce palindromes so quickly. For example, 349 + 943 = 1292, 1292 # + 2921 = 4213 4213 + 3124 = 7337 That is, 349 took three iterations to # arrive at a palindrome. Although no one has proved it yet, it is thought that # some numbers, like 196, never produce a palindrome. A number that never forms # a palindrome through the reverse and add process is called a Lychrel number. # Due to the theoretical nature of these numbers, and for the purpose of this # problem, we shall assume that a number is Lychrel until proven otherwise. In # addition you are given that for every number below ten-thousand, it will # either (i) become a palindrome in less than fifty iterations, or, (ii) no # one, with all the computing power that exists, has managed so far to map it # to a palindrome. In fact, 10677 is the first number to be shown to require # over fifty iterations before producing a palindrome: # 4668731596684224866951378664 (53 iterations, 28-digits). Surprisingly, there # are palindromic numbers that are themselves Lychrel numbers; the first # example is 4994. How many Lychrel numbers are there below ten-thousand? NOTE: # Wording was modified slightly on 24 April 2007 to emphasise the theoretical # nature of Lychrel numbers. import projecteuler as pe def main(): pass if __name__ == "__main__": main()
{ "content_hash": "43adcd09e2d01c96eabab8ecde5ac929", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 79, "avg_line_length": 47.63636363636363, "alnum_prop": 0.7391857506361323, "repo_name": "olduvaihand/ProjectEuler", "id": "7c659382c6c8e491d36dba2cab8c27d170724531", "size": "1574", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/python/problem055.py", "mode": "33188", "license": "mit", "language": [ { "name": "Go", "bytes": "0" }, { "name": "Python", "bytes": "422751" } ], "symlink_target": "" }
from __future__ import unicode_literals # no division here import unittest from pyoxy import ObjectProxy as OP try: # pragma: no cover unicode _PY3 = False except NameError: # pragma: no cover _PY3 = True class ObjectProxyNoFutureDivisionTest(unittest.TestCase): def check_result(self, expected_result, result, assert_result_is_proxy=False): self.assertEqual(expected_result, result) self.assertEqual(assert_result_is_proxy, isinstance(result, OP)) def test_div(self): exp = 2 / 3 self.check_result(exp, OP(2) / 3) self.check_result(exp, OP(2) / OP(3)) self.check_result(exp, OP(2) / OP(OP(3))) self.check_result(exp, OP(OP(2)) / OP(OP(3))) self.check_result(exp, OP(OP(OP(2))) / OP(OP(3))) def test_div_error(self): # This test fails in Python 2, passes in Python 3. # There is no reverse operator magic method for old division # and naturally int's div implementation doesn't support the proxy. # The workaround is to wrap the 1st operand in ObjectProxy, # like in the example above in test_div. self.check_result(2 / 3, 2 / OP(3)) if not _PY3: # pragma: no cover test_div_error = unittest.expectedFailure(test_div_error) def test_floordiv(self): exp = 8 // 3 self.check_result(exp, OP(8) // OP(3)) self.check_result(exp, 8 // OP(3)) self.check_result(exp, OP(8) // 3) self.check_result(exp, OP(OP(8)) // OP(OP(3)))
{ "content_hash": "a1f2162fad33c0239b71e5ff2f5632cc", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 75, "avg_line_length": 34.333333333333336, "alnum_prop": 0.6148867313915858, "repo_name": "jacekmitrega/pyoxy", "id": "3637432df182cdafe5961924d8c678fe869c066d", "size": "2146", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_objectproxy_nofuturedivision.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "28197" }, { "name": "Shell", "bytes": "154" } ], "symlink_target": "" }
from __future__ import print_function # SFTP storage backend for Django. # Author: Brent Tubbs <brent.tubbs@gmail.com> # License: MIT # # Modeled on the FTP storage by Rafal Jonca <jonca.rafal@gmail.com> import getpass import os import paramiko import posixpath import stat from datetime import datetime from django.core.files.base import File from django.core.files.storage import Storage from django.utils.deconstruct import deconstructible from django.utils.six import BytesIO from django.utils.six.moves.urllib import parse as urlparse from storages.utils import setting @deconstructible class SFTPStorage(Storage): def __init__(self, host=None, params=None, interactive=None, file_mode=None, dir_mode=None, uid=None, gid=None, known_host_file=None, root_path=None, base_url=None): self._host = host or setting('SFTP_STORAGE_HOST') self._params = params or setting('SFTP_STORAGE_PARAMS', {}) self._interactive = setting('SFTP_STORAGE_INTERACTIVE', False) \ if interactive is None else interactive self._file_mode = setting('SFTP_STORAGE_FILE_MODE') \ if file_mode is None else file_mode self._dir_mode = setting('SFTP_STORAGE_DIR_MODE') if \ dir_mode is None else dir_mode self._uid = setting('SFTP_STORAGE_UID') if uid is None else uid self._gid = setting('SFTP_STORAGE_GID') if gid is None else gid self._known_host_file = setting('SFTP_KNOWN_HOST_FILE') \ if known_host_file is None else known_host_file self._root_path = setting('SFTP_STORAGE_ROOT', '') \ if root_path is None else root_path self._base_url = setting('MEDIA_URL') if base_url is None else base_url # for now it's all posix paths. Maybe someday we'll support figuring # out if the remote host is windows. self._pathmod = posixpath def _connect(self): self._ssh = paramiko.SSHClient() if self._known_host_file is not None: self._ssh.load_host_keys(self._known_host_file) else: # automatically add host keys from current user. self._ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts"))) # and automatically add new host keys for hosts we haven't seen before. self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: self._ssh.connect(self._host, **self._params) except paramiko.AuthenticationException as e: if self._interactive and 'password' not in self._params: # If authentication has failed, and we haven't already tried # username/password, and configuration allows it, then try # again with username/password. if 'username' not in self._params: self._params['username'] = getpass.getuser() self._params['password'] = getpass.getpass() self._connect() else: raise paramiko.AuthenticationException(e) except Exception as e: print(e) if not hasattr(self, '_sftp'): self._sftp = self._ssh.open_sftp() @property def sftp(self): """Lazy SFTP connection""" if not hasattr(self, '_sftp'): self._connect() return self._sftp def _join(self, *args): # Use the path module for the remote host type to join a path together return self._pathmod.join(*args) def _remote_path(self, name): return self._join(self._root_path, name) def _open(self, name, mode='rb'): return SFTPStorageFile(name, self, mode) def _read(self, name): remote_path = self._remote_path(name) return self.sftp.open(remote_path, 'rb') def _chown(self, path, uid=None, gid=None): """Set uid and/or gid for file at path.""" # Paramiko's chown requires both uid and gid, so look them up first if # we're only supposed to set one. if uid is None or gid is None: attr = self.sftp.stat(path) uid = uid or attr.st_uid gid = gid or attr.st_gid self.sftp.chown(path, uid, gid) def _mkdir(self, path): """Create directory, recursing up to create parent dirs if necessary.""" parent = self._pathmod.dirname(path) if not self.exists(parent): self._mkdir(parent) self.sftp.mkdir(path) if self._dir_mode is not None: self.sftp.chmod(path, self._dir_mode) if self._uid or self._gid: self._chown(path, uid=self._uid, gid=self._gid) def _save(self, name, content): """Save file via SFTP.""" content.open() path = self._remote_path(name) dirname = self._pathmod.dirname(path) if not self.exists(dirname): self._mkdir(dirname) f = self.sftp.open(path, 'wb') f.write(content.file.read()) f.close() # set file permissions if configured if self._file_mode is not None: self.sftp.chmod(path, self._file_mode) if self._uid or self._gid: self._chown(path, uid=self._uid, gid=self._gid) return name def delete(self, name): remote_path = self._remote_path(name) self.sftp.remove(remote_path) def exists(self, name): # Try to retrieve file info. Return true on success, false on failure. remote_path = self._remote_path(name) try: self.sftp.stat(remote_path) return True except IOError: return False def _isdir_attr(self, item): # Return whether an item in sftp.listdir_attr results is a directory if item.st_mode is not None: return stat.S_IFMT(item.st_mode) == stat.S_IFDIR else: return False def listdir(self, path): remote_path = self._remote_path(path) dirs, files = [], [] for item in self.sftp.listdir_attr(remote_path): if self._isdir_attr(item): dirs.append(item.filename) else: files.append(item.filename) return dirs, files def size(self, name): remote_path = self._remote_path(name) return self.sftp.stat(remote_path).st_size def accessed_time(self, name): remote_path = self._remote_path(name) utime = self.sftp.stat(remote_path).st_atime return datetime.fromtimestamp(utime) def modified_time(self, name): remote_path = self._remote_path(name) utime = self.sftp.stat(remote_path).st_mtime return datetime.fromtimestamp(utime) def url(self, name): if self._base_url is None: raise ValueError("This file is not accessible via a URL.") return urlparse.urljoin(self._base_url, name).replace('\\', '/') class SFTPStorageFile(File): def __init__(self, name, storage, mode): self._name = name self._storage = storage self._mode = mode self._is_dirty = False self.file = BytesIO() self._is_read = False @property def size(self): if not hasattr(self, '_size'): self._size = self._storage.size(self._name) return self._size def read(self, num_bytes=None): if not self._is_read: self.file = self._storage._read(self._name) self._is_read = True return self.file.read(num_bytes) def write(self, content): if 'w' not in self._mode: raise AttributeError("File was opened for read-only access.") self.file = BytesIO(content) self._is_dirty = True self._is_read = True def close(self): if self._is_dirty: self._storage._save(self._name, self) self.file.close()
{ "content_hash": "9636404bb3ce30562541adfc3c3e15c5", "timestamp": "", "source": "github", "line_count": 230, "max_line_length": 98, "avg_line_length": 34.47391304347826, "alnum_prop": 0.5974271660991298, "repo_name": "faxioman/django-storages", "id": "6efdf1234adcd1bf08d241572ac4bc92be7e3846", "size": "7929", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "storages/backends/sftpstorage.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "155467" } ], "symlink_target": "" }
import argparse import os import json import collections from subprocess import call parser = argparse.ArgumentParser(description='CUE!') options = parser.add_argument_group('Options') options.add_argument('-p', '--project', help='Specify the target project for the task', required=False) options.add_argument('-s', '--section', help='Specify the section for the task', required=False) options.add_argument('task', help='Specify the task') options.add_argument('project_or_argument', nargs='?', help='Specify the project') global_config_dir_path = os.path.join(os.getenv('HOME'), '.cue') extension = '.cueconf' args = None def get_global_conf(): cues = get_settings_from_directory(global_config_dir_path) if 'projects' not in cues: print 'cueconf is missing projects section' exit() if 'defaultSection' not in cues: print 'cueconf is missing defaultSection definition' exit() return cues def get_settings_from_directory(directory_path): if not os.path.exists(directory_path): print 'directory does not exist (%s)' % directory_path exit() cueconf_file_paths = \ [os.path.join(directory_path, file) for file in os.listdir(directory_path) if file.lower().endswith(extension)] cues = {} for cueconf_path in cueconf_file_paths: if not os.path.isfile(cueconf_path): print "cueconf doesn't exist " + cueconf_path exit() try: cueconf_contents = json.load(open(cueconf_path)) except: print '%s is not valid json' % cueconf_path exit() cues = recursive_update(cues, cueconf_contents) return cues def get_project_conf(global_conf, project_name=None): cues = None if project_name: if project_name not in global_conf['projects']: print 'Project %s not found' % project_name exit() cues = get_settings_from_directory(global_conf['projects'][project_name]) else: for slug in global_conf['projects']: project_conf_path = global_conf['projects'][slug] if os.path.dirname(project_conf_path) in os.getcwd(): cues = get_settings_from_directory(project_conf_path) if not cues: print 'Project found but no cueconf files appear to not exist' exit() break if cues: if 'root_path' not in cues: print "project_conf missing 'root_path'" exit() if 'slug' not in cues: print "project_conf missing 'slug'" exit() if 'name' not in cues: print "project_conf missing 'name'" exit() if 'defaultSection' in cues and cues['defaultSection'] not in cues: print "project_conf missing '%s'" % cues['defaultSection'] exit() elif 'defaultSection' not in cues and global_conf['defaultSection'] not in cues: print "project_conf missing '%s'" % global_config['defaultSection'] exit() else: print 'project_conf not found' exit() return cues def recursive_update(d, u): "Recursively updates a dictionary like object." for k, v in u.iteritems(): if isinstance(v, collections.Mapping): r = recursive_update(d.get(k, {}), v) d[k] = r elif k in d and isinstance(d[k], collections.Iterable) \ and isinstance(v, collections.Iterable): d[k] += v else: d[k] = v return d def register(global_conf, section): project_conf_dict = get_settings_from_directory(os.getcwd()) if project_conf_dict: name = project_conf_dict['name'] slug = project_conf_dict['slug'] else: name = raw_input('Name of Project: ') slug = raw_input('Project Slug: ') default_section = {} project_conf_dict = {'name': name, 'slug': slug, 'root_path': os.getcwd(), section: default_section} proj_cue_path = os.path.join(os.getcwd(), extension) f = open(proj_cue_path, 'w+') json.dump(project_conf_dict, f, indent=4) f.close() if slug in global_conf['projects']: print 'project slug already exists on the system' exit() global_conf['projects'][slug] = os.getcwd() f = open(os.path.join(global_config_dir_path, slug + extension), 'w+') json.dump({'projects': {slug: os.getcwd()}}, f, indent=4) f.close() return project_conf_dict def deregister(global_conf, project_conf): if project_conf['slug'] not in global_conf['projects']: print 'Project %s is not registered' % project_conf['slug'] exit() del global_conf['projects'][project_conf['slug']] os.remove(os.path.join(global_config_dir_path, \ project_conf['slug'] + extension)) def run_task(section, task_name, global_conf, project_conf): def exec_task(task, default_flow='next'): exec_string = None if isinstance(task, collections.Mapping): if 'exec' in task: exec_string = task['exec'] elif isinstance(task, basestring): if task.startswith(':'): return run_task(task[1:]) else: exec_string = task exit_code = 0 if exec_string: exit_code = call(exec_string, shell=True) flow = default_flow if exit_code != 0: # error - default flow is stop flow = 'stop' if isinstance(task, collections.Mapping) and'onError' in task: flow = exec_task(task['onError'], default_flow='stop') else: # success if 'flow' in task: flow = task['flow'] return flow tasks = None #Local if task_name in project_conf[section]: tasks = project_conf[section][task_name] #Global if not tasks: if 'global' in global_conf[section] and \ task_name in global_conf[section]['global']: tasks = global_conf[section]['global'][task_name] #By Group if not tasks: for group in global_conf[section]: if group in project_conf: tasks = \ global_conf[section][group][project_conf[group]][task_name] else: print 'Project missing setting. ' + \ 'Please define an entry for %s[%s] = (%s)' % \ (section, group, str(global_conf[section][group].keys())) if tasks: break if not tasks: print '(%s) tasks not found' % task_name exit() if not isinstance(tasks, collections.Iterable) or \ isinstance(tasks, basestring): tasks = [tasks] previous_index = 0 current_index = 0 next_index = 1 stop_index = len(tasks) while True: flow = exec_task(tasks[current_index]) if flow == 'next': previous_index = current_index current_index = next_index next_index = min(stop_index, current_index + 1) elif flow == 'previous': current_index = previous_index previous_index = max(0, current_index - 1) next_index = min(stop_index, current_index + 1) elif flow == 'stop': current_index = stop_index next_index = stop_index previous_index = max(0, current_index - 1) elif flow.startswith('#'): current_index = int(flow[1:]) previous_index = max(0, current_index - 1) next_index = min(stop_index, current_index + 1) if current_index >= stop_index: break if __name__ == '__main__': args = vars(parser.parse_args()) global_conf = get_global_conf() if not args['project'] and args['project_or_argument']: # todo - check to see if this is the name of a project otherwise assume # it is a parameter being passed to the commands args['project'] = args['project_or_argument'] is_section_default = False if not args['section']: args['section'] = global_conf['defaultSection'] is_section_default = True if args['task'] == 'register': project_conf = register(global_conf, args['section']) else: project_conf = get_project_conf(global_conf, args['project']) if is_section_default and 'defaultSection' in project_conf: args['section'] = project_conf['defaultSection'] if args['task'] == 'deregister': deregister(global_conf, project_conf) else: run_task(args['section'], args['task'], global_conf, project_conf)
{ "content_hash": "7ada7fa2d45b92e7ff10620ad1db9a77", "timestamp": "", "source": "github", "line_count": 287, "max_line_length": 88, "avg_line_length": 31.261324041811847, "alnum_prop": 0.5646455639768168, "repo_name": "josephdwyer/cue", "id": "66f13738d4a2b8cebf6783db4e982b35307e77af", "size": "8972", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cue.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "8972" } ], "symlink_target": "" }
""" @author Stephan Reith @date 14.09.2016 This is a simple example to demonstrate how the ROS Spinnaker Interface can be used to send only. You will also need a ROS Talker to send and data. Make sure they communicate over the same ROS topics and std_msgs.Int64 ROS Messages used in here. """ import spynnaker.pyNN as pynn from ros_spinnaker_interface import ROS_Spinnaker_Interface # import transfer_functions as tf from ros_spinnaker_interface import SpikeSourcePoisson ts = 0.1 n_neurons = 1 simulation_time = 10000 # ms pynn.setup(timestep=ts, min_delay=ts, max_delay=2.0*ts) pop = pynn.Population(size=n_neurons, cellclass=pynn.IF_curr_exp, cellparams={}, label='pop') # The ROS_Spinnaker_Interface just needs to be initialised with these two Spike Source Parameters. ros_interface = ROS_Spinnaker_Interface( n_neurons_source=n_neurons, # number of neurons of the injector population Spike_Source_Class=SpikeSourcePoisson) # the transfer function ROS Input -> Spikes you want to use. # Build your network, run the simulation and optionally record the spikes and voltages. pynn.Projection(ros_interface, pop, pynn.OneToOneConnector(weights=5, delays=1)) pop.record() pop.record_v() pynn.run(simulation_time) spikes = pop.getSpikes() pynn.end() # Plot import pylab spike_times = [spike[1] for spike in spikes] spike_ids = [spike[0] for spike in spikes] pylab.plot(spike_times, spike_ids, ".") pylab.xlabel('Time (ms)') pylab.ylabel('Neuron ID') pylab.title('Spike Plot') pylab.xlim(xmin=0) pylab.show()
{ "content_hash": "009c71c27e643cffc789948c75fd2172", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 109, "avg_line_length": 26.948275862068964, "alnum_prop": 0.7415227127319258, "repo_name": "reiths/ros_spinnaker_interface", "id": "db96bebb3bedf364e08caeaa6449b42c35f2dce8", "size": "1610", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/example_sender.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "33436" } ], "symlink_target": "" }
from __future__ import unicode_literals import os import shutil import subprocess from tempfile import NamedTemporaryFile from django.contrib.staticfiles import finders from django.contrib.staticfiles.storage import staticfiles_storage from django.core.files.base import ContentFile from django.utils.encoding import smart_bytes from django.utils.six import string_types, text_type from pipeline.conf import settings from pipeline.exceptions import CompilerError from pipeline.utils import to_class, set_std_streams_blocking class Compiler(object): def __init__(self, storage=None, verbose=False): if storage is None: storage = staticfiles_storage self.storage = storage self.verbose = verbose @property def compilers(self): return [to_class(compiler) for compiler in settings.COMPILERS] def compile(self, paths, compiler_options={}, force=False): def _compile(input_path): for compiler in self.compilers: compiler = compiler(verbose=self.verbose, storage=self.storage) if compiler.match_file(input_path): try: infile = self.storage.path(input_path) except NotImplementedError: infile = finders.find(input_path) outfile = compiler.output_path(infile, compiler.output_extension) outdated = compiler.is_outdated(infile, outfile) compiler.compile_file(infile, outfile, outdated=outdated, force=force, **compiler_options) return compiler.output_path(input_path, compiler.output_extension) else: return input_path try: import multiprocessing from concurrent import futures except ImportError: return list(map(_compile, paths)) else: with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor: return list(executor.map(_compile, paths)) class CompilerBase(object): def __init__(self, verbose, storage): self.verbose = verbose self.storage = storage def match_file(self, filename): raise NotImplementedError def compile_file(self, infile, outfile, outdated=False, force=False): raise NotImplementedError def save_file(self, path, content): return self.storage.save(path, ContentFile(smart_bytes(content))) def read_file(self, path): file = self.storage.open(path, 'rb') content = file.read() file.close() return content def output_path(self, path, extension): path = os.path.splitext(path) return '.'.join((path[0], extension)) def is_outdated(self, infile, outfile): if not os.path.exists(outfile): return True try: return os.path.getmtime(infile) > os.path.getmtime(outfile) except OSError: return True class SubProcessCompiler(CompilerBase): def execute_command(self, command, cwd=None, stdout_captured=None): """Execute a command at cwd, saving its normal output at stdout_captured. Errors, defined as nonzero return code or a failure to start execution, will raise a CompilerError exception with a description of the cause. They do not write output. This is file-system safe (any valid file names are allowed, even with spaces or crazy characters) and OS agnostic (existing and future OSes that Python supports should already work). The only thing weird here is that any incoming command arg item may itself be a tuple. This allows compiler implementations to look clean while supporting historical string config settings and maintaining backwards compatibility. Thus, we flatten one layer deep. ((env, foocomp), infile, (-arg,)) -> (env, foocomp, infile, -arg) """ argument_list = [] for flattening_arg in command: if isinstance(flattening_arg, string_types): argument_list.append(flattening_arg) else: argument_list.extend(flattening_arg) # The first element in argument_list is the program that will be executed; if it is '', then # a PermissionError will be raised. Thus empty arguments are filtered out from argument_list argument_list = filter(None, argument_list) stdout = None try: # We always catch stdout in a file, but we may not have a use for it. temp_file_container = cwd or os.path.dirname(stdout_captured or "") or os.getcwd() with NamedTemporaryFile(delete=False, dir=temp_file_container) as stdout: compiling = subprocess.Popen(argument_list, cwd=cwd, stdout=stdout, stderr=subprocess.PIPE) _, stderr = compiling.communicate() set_std_streams_blocking() if compiling.returncode != 0: stdout_captured = None # Don't save erroneous result. raise CompilerError( "{0!r} exit code {1}\n{2}".format(argument_list, compiling.returncode, stderr), command=argument_list, error_output=stderr) # User wants to see everything that happened. if self.verbose: with open(stdout.name) as out: print(out.read()) print(stderr) except OSError as e: stdout_captured = None # Don't save erroneous result. raise CompilerError(e, command=argument_list, error_output=text_type(e)) finally: # Decide what to do with captured stdout. if stdout: if stdout_captured: shutil.move(stdout.name, os.path.join(cwd or os.curdir, stdout_captured)) else: os.remove(stdout.name)
{ "content_hash": "b7ec44107dbbb925ee10e0cc2b5adfca", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 100, "avg_line_length": 40.588235294117645, "alnum_prop": 0.6061191626409018, "repo_name": "kronion/django-pipeline", "id": "eb43157e560d24f7ff88dbfa21d2edf222764b21", "size": "6210", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pipeline/compilers/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1904" }, { "name": "CoffeeScript", "bytes": "104" }, { "name": "HTML", "bytes": "2525" }, { "name": "JavaScript", "bytes": "1760" }, { "name": "LiveScript", "bytes": "26" }, { "name": "Python", "bytes": "134394" } ], "symlink_target": "" }
from google.cloud import datacatalog_v1 def sample_create_entry_group(): # Create a client client = datacatalog_v1.DataCatalogClient() # Initialize request argument(s) request = datacatalog_v1.CreateEntryGroupRequest( parent="parent_value", entry_group_id="entry_group_id_value", ) # Make the request response = client.create_entry_group(request=request) # Handle the response print(response) # [END datacatalog_v1_generated_DataCatalog_CreateEntryGroup_sync]
{ "content_hash": "9fdb35b21ce883614e3acb715b837d89", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 66, "avg_line_length": 25.9, "alnum_prop": 0.7084942084942085, "repo_name": "googleapis/python-datacatalog", "id": "1d3631b779be80b851c7b20fb76724e3802dd65d", "size": "1915", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "samples/generated_samples/datacatalog_v1_generated_data_catalog_create_entry_group_sync.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "3073442" }, { "name": "Shell", "bytes": "30675" } ], "symlink_target": "" }
from distutils.core import setup setup(name = 'filltex', version = '1.5.1', description = 'Automatic queries to ADS and InSPIRE databases to fill LATEX bibliography', long_description="See: `github.com/dgerosa/filltex <https://github.com/dgerosa/filltex>`_." , author = 'Davide Gerosa and Michele Vallisneri', author_email = 'dgerosa@star.sr.bham.ac.uk', url = 'https://github.com/dgerosa/filltex', license='MIT', py_modules = ['fillbib'], scripts = ['bin/fillbib','bin/filltex'], include_package_data=True, zip_safe=False )
{ "content_hash": "07a9e72e58f46eae349b13bcad4bc08c", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 99, "avg_line_length": 39.86666666666667, "alnum_prop": 0.6438127090301003, "repo_name": "dgerosa/filltex", "id": "16f3e996fe552fa920bc8a7067132c91c86235ac", "size": "621", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "6783" }, { "name": "Shell", "bytes": "2092" }, { "name": "TeX", "bytes": "9111" } ], "symlink_target": "" }
""" This class encapsulates the interactions with the student advisers. """ from restclients_core.exceptions import DataFailureException from uw_sws.adviser import get_advisers_by_regid from myuw.dao.pws import get_regid_of_current_user from myuw.dao import is_using_file_dao, get_netid_of_current_user def get_academic_advisers(request): """ returns a list of uw_sws.models.StudentAdviser for the current user """ if is_using_file_dao(): if get_netid_of_current_user(request) == 'javg002': raise DataFailureException( "/student/v5/person/advisers.json", 500, "mock 500 error") return get_advisers_by_regid(get_regid_of_current_user(request))
{ "content_hash": "8b7d3833e228db9ff161466224d985a8", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 68, "avg_line_length": 34.333333333333336, "alnum_prop": 0.6976421636615812, "repo_name": "uw-it-aca/myuw", "id": "f96d3aa125f21dbbad70dc08f613c97765caf943", "size": "809", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "myuw/dao/adviser.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "1353" }, { "name": "Dockerfile", "bytes": "1182" }, { "name": "HTML", "bytes": "87842" }, { "name": "JavaScript", "bytes": "362025" }, { "name": "Python", "bytes": "1057335" }, { "name": "SCSS", "bytes": "5763" }, { "name": "Shell", "bytes": "838" }, { "name": "Vue", "bytes": "522119" } ], "symlink_target": "" }
from django.contrib import admin from quick_reports_demo.blog.models import Article class ArticleAdmin(admin.ModelAdmin): def __init__(self, *args, **kwargs): super(ArticleAdmin, self).__init__(*args, **kwargs) list_display = ('pk', 'title', 'created_at', 'status') list_filter = ('status', ) admin.site.register(Article, ArticleAdmin)
{ "content_hash": "eb930a722351a6e3c707880f0348e6f1", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 59, "avg_line_length": 27.76923076923077, "alnum_prop": 0.6759002770083102, "repo_name": "brsbilgic/django-quick-reports", "id": "6e02eb52a634454ff8b56f7be0850a39f13d713c", "size": "361", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "quick_reports_demo/blog/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3475" }, { "name": "HTML", "bytes": "6176" }, { "name": "JavaScript", "bytes": "5783" }, { "name": "Python", "bytes": "19735" } ], "symlink_target": "" }
""" Support for playing AudioSegments. Pyaudio will be used if it's installed, otherwise will fallback to ffplay. Pyaudio is a *much* nicer solution, but is tricky to install. See my notes on installing pyaudio in a virtualenv (on OSX 10.10): https://gist.github.com/jiaaro/9767512210a1d80a8a0d """ import subprocess from tempfile import NamedTemporaryFile from .utils import get_player_name, make_chunks PLAYER = get_player_name() def _play_with_ffplay(seg): with NamedTemporaryFile("w+b", suffix=".wav") as f: seg.export(f.name, "wav") subprocess.call([PLAYER, "-nodisp", "-autoexit", "-hide_banner", f.name]) def _play_with_pyaudio(seg): import pyaudio p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(seg.sample_width), channels=seg.channels, rate=seg.frame_rate, output=True) # break audio into half-second chunks (to allows keyboard interrupts) for chunk in make_chunks(seg, 500): stream.write(chunk._data) stream.stop_stream() stream.close() p.terminate() def play(audio_segment): try: import pyaudio _play_with_pyaudio(audio_segment) except ImportError: _play_with_ffplay(audio_segment)
{ "content_hash": "d44edddfb37ba7db0a901cad0c45bd6e", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 81, "avg_line_length": 27.82608695652174, "alnum_prop": 0.66484375, "repo_name": "achang97/YouTunes", "id": "a162152aaadc18382ea7561052b9664aa06997d8", "size": "1280", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "lib/python2.7/site-packages/pydub/playback.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "9366" } ], "symlink_target": "" }
""" En una empresa hay 15 trabajadores. Se quiere hacer un torneo de ajedrez. Hay que escribir una función a la que se le pasa una lista de todos los empleados de la empresa y la función devuelve otra lista con todas las partidas posibles entre empleados. Nota: Un empleado no juega contra sí mismo. a. Solución Original b. Generar el orden en el que se van a ejecutar las partidas. Generamos un número al azar del tamaño de la lista, sacamos la pareja elegida y la guardamos en una nueva lista, borrando la pareja de la original. """ from random import randint trabajadores = ['T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'T8', 'T9', 'T10', 'T11', 'T12', 'T13', 'T14', 'T15'] def posibles_partidas(participantes): """ Devuelve una lista con las posibles parejas de participantes :param participantes: :return: """ return [(p1, p2) for p1 in trabajadores for p2 in trabajadores if p1 != p2] def partidas(participantes): """ Devuelve el orden en el cual las posibles_partidas se van a ejecutar :param participantes: :return: """ pospartidas = posibles_partidas(participantes) ret = [] while pospartidas: idx = randint(0, len(pospartidas) - 1) ret.append(pospartidas[idx]) del (pospartidas[idx]) return ret print(len(posibles_partidas(trabajadores)), posibles_partidas(trabajadores)) print(len(partidas(trabajadores)), partidas(trabajadores))
{ "content_hash": "86d4f7c0f8dd18c8edbc31c2bd99196d", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 155, "avg_line_length": 37.35897435897436, "alnum_prop": 0.6877144818119424, "repo_name": "IhToN/DAW1-PRG", "id": "f96d685074f6ceaae285bb83b8b6ba2be8a740d9", "size": "1463", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Ejercicios/PrimTrim/Ejercicio31.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "276667" } ], "symlink_target": "" }
from enigma import eConsoleAppContainer, iServiceInformation, fbClass, eRCInput, eDBoxLCD, getDesktop from Screens.Screen import Screen from Plugins.Plugin import PluginDescriptor from os import symlink, mkdir, remove, rmdir, path class ShellStarter(Screen): skin = """ <screen position="1,1" size="1,1" title="TuxTXT" > </screen>""" faked_lcd = False def __init__(self, session, args = None): self.skin = ShellStarter.skin Screen.__init__(self, session) self.container=eConsoleAppContainer() self.container.appClosed.append(self.finished) self.runapp() def runapp(self): service = self.session.nav.getCurrentService() info = service and service.info() txtpid = info and "%d" %(info.getInfo(iServiceInformation.sTXTPID)) or "" stream = service and service.stream() demux = stream and stream.getStreamingData() demux = demux and demux.get("demux", -1) demux = demux > -1 and "%d" %(demux) or "" eDBoxLCD.getInstance().lock() eRCInput.getInstance().lock() fbClass.getInstance().lock() self.faked_lcd = not path.exists("/dev/dbox") if self.faked_lcd: mkdir("/dev/dbox") symlink("/dev/null", "/dev/dbox/lcd0") if self.container.execute("/usr/bin/tuxtxt " + demux + " " + txtpid): self.finished(-1) def finished(self,retval): fbClass.getInstance().unlock() eRCInput.getInstance().unlock() eDBoxLCD.getInstance().unlock() if self.faked_lcd: remove("/dev/dbox/lcd0") rmdir("/dev/dbox") #force redraw dsk = getDesktop(0) dsk.resize(dsk.size()) self.close() def main(session, **kwargs): session.open(ShellStarter) def Plugins(**kwargs): return PluginDescriptor(name="TuxTXT", description="Videotext", where = PluginDescriptor.WHERE_TELETEXT, fnc=main)
{ "content_hash": "fd9ff9eca1e4c86996e811b95f8eb0e4", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 115, "avg_line_length": 29.133333333333333, "alnum_prop": 0.6979405034324943, "repo_name": "JrCs/opendreambox", "id": "dad428bd375e8689ed6829040256f808f6a6919b", "size": "1748", "binary": false, "copies": "3", "ref": "refs/heads/opendreambox-1.6_JrCs", "path": "recipes/tuxbox/tuxbox-tuxtxt-32bpp/plugin.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "34990" }, { "name": "Assembly", "bytes": "29567" }, { "name": "Awk", "bytes": "736" }, { "name": "BitBake", "bytes": "5140828" }, { "name": "BlitzBasic", "bytes": "58093" }, { "name": "C", "bytes": "3428659" }, { "name": "C++", "bytes": "3715386" }, { "name": "CSS", "bytes": "984" }, { "name": "Groff", "bytes": "1252383" }, { "name": "HTML", "bytes": "9397" }, { "name": "JavaScript", "bytes": "13632" }, { "name": "Lua", "bytes": "17456" }, { "name": "Makefile", "bytes": "278216" }, { "name": "Objective-C", "bytes": "50381" }, { "name": "PHP", "bytes": "9144" }, { "name": "Perl", "bytes": "12121" }, { "name": "Prolog", "bytes": "2471" }, { "name": "Python", "bytes": "428818" }, { "name": "QMake", "bytes": "16238" }, { "name": "R", "bytes": "12383" }, { "name": "Shell", "bytes": "74782340" }, { "name": "VimL", "bytes": "2942" } ], "symlink_target": "" }
import json import unittest import mock from dashboard.pinpoint.models.quest import read_value @mock.patch('dashboard.services.isolate_service.Retrieve') class ReadValueTest(unittest.TestCase): def testReadValue(self, retrieve): retrieve.side_effect = ( {'files': {'chartjson-output.json': {'h': 'chartjson hash'}}}, json.dumps({'charts': {'metric': {'test': { 'type': 'list_of_scalar_values', 'values': [0, 1, 2], }}}}), ) execution = read_value.ReadValue('metric', 'test').Start('output hash') execution.Poll() self.assertTrue(execution.completed) self.assertFalse(execution.failed) self.assertEqual(execution.result_values, (0, 1, 2)) self.assertEqual(execution.result_arguments, {}) expected_calls = [mock.call('output hash'), mock.call('chartjson hash')] self.assertEqual(retrieve.mock_calls, expected_calls) def testReadValueWithNoTest(self, retrieve): retrieve.side_effect = ( {'files': {'chartjson-output.json': {'h': 'chartjson hash'}}}, json.dumps({'charts': {'metric': {'summary': { 'type': 'list_of_scalar_values', 'values': [0, 1, 2], }}}}), ) execution = read_value.ReadValue('metric', None).Start('output hash') execution.Poll() self.assertTrue(execution.completed) self.assertFalse(execution.failed) self.assertEqual(execution.result_values, (0, 1, 2)) self.assertEqual(execution.result_arguments, {}) expected_calls = [mock.call('output hash'), mock.call('chartjson hash')] self.assertEqual(retrieve.mock_calls, expected_calls) def testHistogram(self, retrieve): retrieve.side_effect = ( {'files': {'chartjson-output.json': {'h': 'chartjson hash'}}}, json.dumps({'charts': {'metric': {'test': { 'type': 'histogram', 'buckets': [ {'low': 0, 'count': 2}, {'low': 0, 'high': 2, 'count': 3}, ], }}}}), ) execution = read_value.ReadValue('metric', 'test').Start('output hash') execution.Poll() self.assertEqual(execution.result_values, (0, 0, 1, 1, 1)) def testHistogramWithLargeSample(self, retrieve): retrieve.side_effect = ( {'files': {'chartjson-output.json': {'h': 'chartjson hash'}}}, json.dumps({'charts': {'metric': {'test': { 'type': 'histogram', 'buckets': [ {'low': 0, 'count': 20000}, {'low': 0, 'high': 2, 'count': 30000}, ], }}}}), ) execution = read_value.ReadValue('metric', 'test').Start('output hash') execution.Poll() self.assertEqual(execution.result_values, tuple([0] * 4000 + [1] * 6000)) def testScalar(self, retrieve): retrieve.side_effect = ( {'files': {'chartjson-output.json': {'h': 'chartjson hash'}}}, json.dumps({'charts': {'metric': {'test': { 'type': 'scalar', 'value': 2.5, }}}}), ) execution = read_value.ReadValue('metric', 'test').Start('output hash') execution.Poll() self.assertEqual(execution.result_values, (2.5,))
{ "content_hash": "8b24d8dcb1a96c691baa058128b1f9d1", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 77, "avg_line_length": 32.30612244897959, "alnum_prop": 0.5808591282375237, "repo_name": "benschmaus/catapult", "id": "e2a69d6db231f2abaebeaf32c8c4d4494f27778d", "size": "3329", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "dashboard/dashboard/pinpoint/models/quest/read_value_test.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "4902" }, { "name": "C++", "bytes": "43486" }, { "name": "CSS", "bytes": "24873" }, { "name": "Go", "bytes": "58279" }, { "name": "HTML", "bytes": "11801772" }, { "name": "JavaScript", "bytes": "518002" }, { "name": "Makefile", "bytes": "1588" }, { "name": "Python", "bytes": "6141932" }, { "name": "Shell", "bytes": "2288" } ], "symlink_target": "" }
""" A family of high-level user commands acting on the entire simulation. Any new commands added to this directory will automatically become available for any program. Commands here should be 'bullet-proof' and work 'from scratch'. That is, they should print warnings if required but should not raise errors that would interrupt e.g. a long batch run of simulation work, no matter what the context from which they are called. """ import cPickle as pickle import sys import os import re import string import time import platform import tarfile, zipfile import __main__ # gzip module might not have been built (if zlib could not be found when building) try: import gzip except ImportError: pass import param from param.parameterized import ParameterizedFunction, ParamOverrides from param import normalize_path import imagen, numbergen from collections import OrderedDict import topo from topo.base.sheet import Sheet from topo.base.projection import ProjectionSheet from topo.sheet import GeneratorSheet from topo.misc.util import MultiFile from topo.misc.picklemain import PickleMain from topo.misc.snapshots import PicklableClassAttributes from topo.misc.genexamples import generate as _generate from featuremapper import PatternDrivenAnalysis def generate_example(target): """ Generate the saved network target, as defined in topo.misc.genexamples. """ _generate(targets=[target]) # Not sure where to put CommandMetaclass, since it doesn't need to be # seen or used by anyone. Probably also the import error classes # shouldn't be so visible. from param.parameterized import ParameterizedMetaclass class CommandMetaclass(ParameterizedMetaclass): """ A class having this as a metaclass will have its __call__() method automatically wrapped so that any exception occurring inside __call__() will be passed to the class's _except() method. """ def __new__(mcs,classname,bases,classdict): if '__call__' in classdict: classdict['__call__'] = mcs._safecall(classdict['__call__']) #assert '_except' in classdic # else it is probably abstract, or something return ParameterizedMetaclass.__new__(mcs,classname,bases,classdict) @classmethod def _safecall(mcs,fn): """ Wrap fn with caller, which catches any exception raised inside fn() and passes it to _except(). """ def caller(self,*args,**kw): try: return fn(self,*args,**kw) except Exception, e: # Mis-invoked call should raise error as normal. if isinstance(e,TypeError): import inspect # Is this a hack to detect mis-calling? if len(inspect.getargspec(fn)[0])!=len(args): raise try: return self._except(e) except: # the _except() method raises an error (or doesn't # exist). what should happen? programming error, # so I guess just re-raise raise return caller class Command(ParameterizedFunction): """ Parameterized command: any error when the command is run (called) will not raise an exception, but will instead generate a warning. """ __metaclass__ = CommandMetaclass __abstract = True def _except(self,e): # import traceback # print traceback.print_exc() self.warning("%s failed: %s"%(self,e)) # or traceback.format_exc())) def __call__(self,*args,**params): return super(Command,self).__call__(*args,**params) class ImportErrorObject(object): """ Raises an ImportError on any attempt to access an attribute, call, or get an item. Useful to delay an ImportError until the point of use, thus allowing e.g. a class attribute to contain something from a non-core external module (e.g. pylab). Delaying an ImportError until the point of use allows users to be informed of the possibility of having various extra functions on installation of a missing package. """ __dict__ = {} def __init__(self,module_name): self.__dict__['_ImportErrorObject__module_name'] = module_name def _raise(self): #param.Parameterized().warning("err:%s"%self.module_name) raise ImportError, "No module named %s. Install %s to get this functionality."%(self.__module_name,self.__module_name) return None def __call__(self,*args,**kw): self._raise() # Might be better to override __getattribute__, special casing the # module_name attribute. Then everything is guaranteed to raise an # error (rather than covering call, getitem, getattr, and maybe # other things I've forgotten about). def __getattr__(self,name): if name in self.__dict__: return self.__dict__[name] return self._raise() def __getitem__(self,i): self._raise() class ImportErrorRaisingFakeModule(object): """ Returns an ImportErrorObject for any attribute request. Instances of this class can be used in place of a module to delay an import error until the point of use of an attribute of that module. See ImportErrorObject for more details. """ def __init__(self,module_name): self.__module_name = module_name def __getattr__(self,name): return ImportErrorObject(self.__module_name) # CEBALERT: commands in here should inherit from Command, and make use # of _except() to ensure all necessary state is reverted. def save_input_generators(): """Save a copy of the active_sim's current input_generators for all GeneratorSheets.""" # ensure EPs get started (if save_input_generators is called before the simulation is run()) topo.sim.run(0.0) generator_sheets = topo.sim.objects(GeneratorSheet).values() for sheet in generator_sheets: sheet.push_input_generator() def restore_input_generators(): """Restore previously saved input_generators for all of topo.sim's GeneratorSheets.""" generator_sheets = topo.sim.objects(GeneratorSheet).values() for sheet in generator_sheets: sheet.pop_input_generator() def clear_event_queue(): """Remove pending events from the simulator's event queue.""" topo.sim.event_clear() class runscript(param.ParameterizedFunction): """ Runs a script that has been parameterized with script parameters. For example, runscript('tiny.ty', cortex_density=10) will execute the 'tiny.ty' script in the currently active namespace. """ ns = param.Parameter(default={}, pickle_default_value=False, doc=""" The namespace in which the script is to be executed.""") push = param.Callable(pickle_default_value=False, doc=""" Hook to push the updated namespace for handling more complicated namespaces, such as IPython Notebook.""") load = param.Boolean(default=True, doc=""" Whether to automatically load class based models when called. Useful for compatibility with older ty script definition files.""") def __call__(self, source_file, ns={}, **kwargs): from topo.misc.commandline import global_params ns = ns if ns else self.ns for (key, val) in kwargs.items(): global_params.exec_in_context('%s=%s' % (key,val)) source_path = param.resolve_path(source_file) code = compile(open(source_path, 'r').read(), "<execution>", "exec") exec code in ns #globals and locals self.push(ns) if self.load: topo.sim(verbose=kwargs.get('verbose', False)) # This class is left around to support older snapshots: All snapshots # since 0.9.7 up until r11545 (addition of UnpickleEnvironmentCreator) # have a pickled instance of this class. We maintain the same behavior # as before for them: install all legacy support. class _VersionPrinter(object): def __setstate__(self,state): import topo.misc.legacy as L L.SnapshotSupport.install("0.9.7") class UnpickleEnvironmentCreator(object): """When unpickled, installs any necessary legacy support.""" def __init__(self,release,version): self.release = release self.version = version def __getstate__(self): return {'release':self.release, 'version':self.version} def __setstate__(self,state): self.release = state['release'] self.version = state['version'] import topo.misc.legacy as L L.SnapshotSupport.install(self.release,self.version) def save_snapshot(snapshot_name=None): """ Save a snapshot of the network's current state. The snapshot is saved as a gzip-compressed Python binary pickle. As this function uses Python's 'pickle' module, it is subject to the same limitations (see the pickle module's documentation) - with the notable exception of class attributes. Python does not pickle class attributes, but this function stores class attributes of any Parameterized class that is declared within the topo package. See the param.parameterized.PicklableClassAttributes class for more information. """ if not snapshot_name: snapshot_name = topo.sim.basename() + ".typ" # For now we just search topo, but could do same for other packages. # CEBALERT: shouldn't it be topo and param? I guess we already get # many classes defined in param because they are imported into # topo at some point anyway. topoPOclassattrs = PicklableClassAttributes(topo,exclusions=('plotting','tests','tkgui'), startup_commands=topo.sim.startup_commands) paramPOclassattrs = PicklableClassAttributes(param) imagenPOclassattrs = PicklableClassAttributes(imagen) numbergenPOclassattrs = PicklableClassAttributes(numbergen) from topo.misc.commandline import global_params topo.sim.RELEASE=topo.release topo.sim.VERSION=topo.version to_save = (UnpickleEnvironmentCreator(topo.release,topo.version), PickleMain(), global_params, topoPOclassattrs, paramPOclassattrs, imagenPOclassattrs, numbergenPOclassattrs, topo.sim) try: snapshot_file=gzip.open(normalize_path(snapshot_name),'wb',compresslevel=5) except NameError: snapshot_file=open(normalize_path(snapshot_name),'wb') pickle.dump(to_save,snapshot_file,2) snapshot_file.close() def load_snapshot(snapshot_name): """ Load the simulation stored in snapshot_name. """ # unpickling the PicklableClassAttributes() executes startup_commands and # sets PO class parameters. snapshot_name = param.resolve_path(snapshot_name) # If it's not gzipped, open as a normal file. try: snapshot = gzip.open(snapshot_name,'r') snapshot.read(1) snapshot.seek(0) except (IOError,NameError): snapshot = open(snapshot_name,'r') try: pickle.load(snapshot) except ImportError: # CEBALERT: Support snapshots where the unpickling support # (UnpickleEnvironmentCreator) cannot be found because the # support itself was moved from topo.command.basic to # topo.command.__init__! Was it a bad idea to have the support # code loaded via an object? sys.modules['topo.command.basic'] = topo.command # Could instead set find_global on cPickle.Unpickler (could # support all import changes that way, as alternative to what # we currently do), but I'm reluctant to mess with cPickle's # default way of finding things. (Also it would be specific to # cPickle; would be different for pickle.) snapshot.seek(0) try: pickle.load(snapshot) except: import traceback m = """ Snapshot could not be loaded. Please file a support request via topographica.org. Loading error: %s """%traceback.format_exc() param.Parameterized(name="load_snapshot").warning(m) snapshot.close() # Restore subplotting prefs without worrying if there is a # problem (e.g. if topo/analysis/ is not present) try: from topo.plotting.plotgroup import Subplotting Subplotting.restore_subplots() except: p = param.Parameterized(name="load_snapshot") p.message("Unable to restore Subplotting settings") # Temporary -- broadcast topo.sim.time to all subpackages param.Dynamic.time_fn = topo.sim.time numbergen.TimeAware.time_fn = topo.sim.time imagen.Sweeper.time_fn = topo.sim.time def save_script_repr(script_name=None): """ Save the current simulation as a Topographica script. Generates a script that, if run, would generate a simulation with the same architecture as the one currently in memory. This can be useful when defining networks in place, so that the same general configuration can be recreated later. It also helps when comparing two similar networks generated with different scripts, so that the corresponding items can be matched rigorously. Note that the result of this operation is usually just a starting point for further editing, because it will not usually be runnable as-is (for instance, some parameters may not have runnable representations). Even so, this is usually a good start. """ if not script_name: script_name = topo.sim.basename() + "_script_repr.ty" header = ("# Generated by Topographica %s on %s\n\n" % (topo.release,time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))) script = header+topo.sim.script_repr() script_file = open(normalize_path(script_name),'w') script_file.write(script) # Location of the version-controlled topographica directory (i.e. path # of topo/ but up a level). Could be None. Nothing should assume that # there is a version control system available. try: vc_topographica_dir = os.path.split(os.path.split(topo.__file__)[0])[0] except: vc_topographica_dir = None # decorator that changes to vc_topographica_dir for duration of fn, # if there is such a directory. Otherwise, doesn't change directory. def in_vc_topographica_dir(fn): import os def temporarily_change_to_vc_topographica_dir(*args,**kw): orig_path = os.getcwd() if vc_topographica_dir is not None: os.chdir(vc_topographica_dir) try: result = fn(*args,**kw) finally: # ensure dir put back even if there's an error calling fn if os.getcwd()!=orig_path: os.chdir(orig_path) return result return temporarily_change_to_vc_topographica_dir @in_vc_topographica_dir def _get_vc_commands(): # return name of version control system (None if no vc could be # detected) import os.path vc_types = {'git':["status","diff",["log","-n1"],["svn","log","--limit=1"]], 'svn':["info","status","diff"], 'bzr':['info','status','diff']} for vc_type,commands in vc_types.items(): if os.path.exists(".%s"%vc_type): return vc_type,commands @in_vc_topographica_dir def _print_vc_info(filename): """Save the version control status of the current code to the specified file.""" try: import subprocess f = open(normalize_path(filename),'w') f.write("Information about working copy used for batch run\n\n") f.write("topo.version=%s\n"% topo.version) f.flush() vctype,commands = _get_vc_commands() for cmd in commands: fullcmd = [vctype,cmd] if isinstance(cmd,str) else [vctype]+cmd # Note that we do not wait for the process below to finish # (by calling e.g. wait() on the Popen object). Although # this was probably done unintentionally, for a slow svn # connection, it's an advantage. But it does mean the # output of each command can appear in the file at any # time (i.e. the command outputs appear in the order of # finishing, rather than in the order of starting, making # it impossible to label the commands). subprocess.Popen(fullcmd,stdout=f,stderr=subprocess.STDOUT) except: print "Unable to retrieve version control information." finally: f.close() def _save_parameters(p,filename): from topo.misc.commandline import global_params g = {'global_params_specified':p, 'global_params_all':dict(global_params.get_param_values())} for d in g.values(): if 'name' in d: del d['name'] if 'print_level' in d: del d['print_level'] pickle.dump(g,open(normalize_path(filename),'w')) # I'd expect your personal name_replacements to be set in some file # you use to create batch runs, but it can alsp be set on the # commandline. Before calling run_batch(), include something like the # following: # run_batch.dirname_params_filter.map=OrderedDict(("cortex_density","cd")) class param_formatter(ParameterizedFunction): # CEBALERT: should I have made this a parameter at the run_batch # level? And I don't know what to call it. map = param.Dict(default=OrderedDict(),doc=""" Optional ordered dictionary of alternative names to use for parameters, parameter_name:alternative_name Use to shorten common parameter names (directory names are limited in length on most file systems), and to specify an order. Names not specified here will be sorted alphabetically.""") def __call__(self,params): result = "" # present in params but not in map unspecified_in_map = sorted(set(params).difference(set(self.map))) # present in params and in map, preserving order of map specified_in_map = [n for n in self.map.keys() if n in params] for pname in specified_in_map+unspecified_in_map: val = params[pname] # Special case to give reasonable filenames for lists valstr= ("_".join([str(i) for i in val]) if isinstance(val,list) else str(val)) result += "," + self.map.get(pname,pname) + "=" + valstr return result # Used only by default_analysis_function # Should be in order they are needed; e.g. Activity after map measurement, # in case Activity plot includes map subplots default_analysis_plotgroups=["Orientation Preference","Activity"] def default_analysis_function(): """ Basic example of an analysis command for run_batch; users are likely to need something similar but highly customized. """ # CEBALERT: why are these imports here rather than at the top? import topo from topo.plotting.plotgroup import save_plotgroup # Save all plotgroups listed in default_analysis_plotgroups for pg in default_analysis_plotgroups: save_plotgroup(pg,use_cached_results=True) # Plot projections from each measured map measured_sheets = [s for s in topo.sim.objects(ProjectionSheet).values() if hasattr(s,'measure_maps') and s.measure_maps] for s in measured_sheets: for p in s.in_connections: save_plotgroup("Projection",projection=p) # Test response to a standardized pattern from imagen import Gaussian from analysis import pattern_present from math import pi pattern_present(inputs=Gaussian(orientation=pi/4,aspect_ratio=4.7)) save_plotgroup("Activity",saver_params={"filename_suffix":"_45d"}) def load_kwargs(fname, glob, loc, fail_exception=False): """ Helper function to allow keyword arguments (dictionary format) to be loaded from a file 'fname'. The intended use is to allow a callable (specifically run_batch) to obtain its settings and parameters from file. This is useful when dispatching jobs on a cluster as you can then queue run_batch jobs (eg. using qsub) before all the settings are known. This type of scenario is typical in parameter search (eg hillclimbing) where the settings file for future run_batch instances are conditional on data from previous simulations. Variable glob should be provided as globals() and loc should be provided as locals(). Either a dictionary is returned or an exception is raised (conditioned on fail_exception). If fail_exception=False and eval does not evaluateas expected, an empty dictionary is returned. Eval is used as it allows classes, objects and other complex datastructures to load. """ with open(fname,'r') as f: lines = f.readlines() expression = "".join([l.strip() for l in lines]) kwargs = eval(expression, glob, loc) if not isinstance(kwargs,dict): if fail_exception: raise Exception('Invalid settings file.') else: return {} else: return kwargs # ALERT: Need to move docs into params. class run_batch(ParameterizedFunction): """ Run a Topographica simulation in batch mode. Features: - Generates a unique, well-defined name for each 'experiment' (i.e. simulation run) based on the date, script file, and parameter settings. Note that very long names may be truncated (see the max_name_length parameter). - Allows parameters to be varied on the command-line, to allow comparing various settings - Saves a script capturing the simulation state periodically, to preserve parameter values from old experiments and to allow them to be reproduced exactly later - Can perform user-specified analysis routines periodically, to monitor the simulation as it progresses. - Stores commandline output (stdout) in the output directory A typical use of this function is for remote execution of a large number of simulations with different parameters, often on remote machines (such as clusters). The script_file parameter defines the .ty script we want to run in batch mode. The output_directory defines the root directory in which a unique individual directory will be created for this particular run. The optional analysis_fn can be any python function to be called at each of the simulation iterations defined in the analysis times list. The analysis_fn should perform whatever analysis of the simulation you want to perform, such as plotting or calculating some statistics. The analysis_fn should avoid using any GUI functions (i.e., should not import anything from topo.tkgui), and it should save all of its results into files. As a special case, a number can be passed for the times list, in which case it is used to scale a default list of times up to 10000; e.g. times=2 will select a default list of times up to 20000. Alternatively, an explicit list of times can be supplied. Any other optional parameters supplied will be set in the main namespace before any scripts are run. They will also be used to construct a unique topo.sim.name for the file, and they will be encoded into the simulation directory name, to make it clear how each simulation differs from the others. If requested by setting snapshot=True, saves a snapshot at the end of the simulation. If available and requested by setting vc_info=True, prints the revision number and any outstanding diffs from the version control system. Note that this function alters param.normalize_path.prefix so that all output goes into the same location. The original value of param.normalize_path.prefix is deliberately not restored at the end of the function so that the output of any subsequent commands will go into the same place. """ output_directory=param.String("Output") analysis_fn = param.Callable(default_analysis_function) times = param.Parameter(1.0) snapshot=param.Boolean(True) vc_info=param.Boolean(True) dirname_prefix = param.String(default="",doc=""" Optional prefix for the directory name (allowing e.g. easy grouping).""") tag = param.String(default="",doc=""" Optional tag to embed in directory prefix to allow unique directory naming across multiple independent batches that share a common timestamp.""") # CB: do any platforms also have a maximum total path length? max_name_length = param.Number(default=200,doc=""" The experiment's directory name will be truncated at this number of characters (since most filesystems have a limit).""") name_time_format = param.String(default="%Y%m%d%H%M",doc=""" String format for the time included in the output directory and file names. See the Python time module library documentation for codes. E.g. Adding '%S' to the default would include seconds.""") timestamp = param.NumericTuple(default=(0,0), doc=""" Optional override of timestamp in Python struct_time 8-tuple format. Useful when running many run_batch commands as part of a group with a shared timestamp. By default, the timestamp used is the time when run_batch is started.""") save_global_params = param.Boolean(default=True,doc=""" Whether to save the script's global_parameters to a pickle in the output_directory after the script has been loaded (for e.g. future inspection of the experiment).""") dirname_params_filter = param.Callable(param_formatter.instance(),doc=""" Function to control how the parameter names will appear in the output_directory's name.""") metadata_dir = param.String(doc="""Specifies the name of a subdirectory used to output metadata from run_batch (if set).""") compress_metadata = param.ObjectSelector(default=None, objects=[None, 'tar.gz', 'zip'], doc=""" If not None and a metadata directory is specified, the metadata directory will be replaced by either a tar.gz file or a .zip file.""") save_script_repr = param.ObjectSelector(default='first', objects=[None, 'first', 'last', 'all'], doc=""" Whether to save a script_repr and if so, how often. If set to 'first', the script_repr is saved on the first time value, if set to 'last' then it will be saved on the last time value. If set to 'all' then a script repr is saved for all time values. Saving is disabled entirely if set to None.""") progress_bar = param.String(default='stdout', doc=""" The display mode for the progress bar. By default, the progress of run_batch is displayed using standard output but may also be set to 'disabled' as necessary.""") progress_interval = param.Number(default=100, doc=""" Interval between updates of the progress bar (if enabled) in units of topo.sim.time.""") def _truncate(self,p,s): """ If s is greater than the max_name_length parameter, truncate it (and indicate that it has been truncated). """ # '___' at the end is supposed to represent '...' return s if len(s)<=p.max_name_length else s[0:p.max_name_length-3]+'___' def __call__(self,script_file,**params_to_override): p=ParamOverrides(self,params_to_override,allow_extra_keywords=True) import os import shutil # Construct simulation name, etc. scriptbase= re.sub('.ty$','',os.path.basename(script_file)) prefix = "" if p.timestamp==(0,0): prefix += time.strftime(p.name_time_format) else: prefix += time.strftime(p.name_time_format, p.timestamp) prefix += "_" + scriptbase + "_" + p.tag simname = prefix # Construct parameter-value portion of filename; should do more filtering # CBENHANCEMENT: should provide chance for user to specify a # function (i.e. make this a function, and have a parameter to # allow the function to be overridden). # And sort by name by default? Skip ones that aren't different # from default, or at least put them at the end? prefix += p.dirname_params_filter(p.extra_keywords()) # Set provided parameter values in main namespace from topo.misc.commandline import global_params global_params.set_in_context(**p.extra_keywords()) # Create output directories if not os.path.isdir(normalize_path(p.output_directory)): try: os.mkdir(normalize_path(p.output_directory)) except OSError: pass # Catches potential race condition (simultaneous run_batch runs) dirname = self._truncate(p,p.dirname_prefix+prefix) dirpath = normalize_path(os.path.join(p.output_directory,dirname)) normalize_path.prefix = dirpath metadata_dir = os.path.join(normalize_path.prefix, p.metadata_dir) simpath = os.path.join(metadata_dir, simname) if os.path.isdir(normalize_path.prefix): print "Batch run: Warning -- directory already exists!" print "Run aborted; wait one minute before trying again, or else rename existing directory: \n" + \ normalize_path.prefix sys.exit(-1) else: os.makedirs(metadata_dir) print "Batch run output will be in " + normalize_path.prefix if p.vc_info: _print_vc_info(simpath + ".diffs") hostinfo = "Host: " + " ".join(platform.uname()) topographicalocation = "Topographica: " + os.path.abspath(sys.argv[0]) topolocation = "topo package: " + os.path.abspath(topo.__file__) scriptlocation = "script: " + os.path.abspath(script_file) starttime=time.time() startnote = "Batch run started at %s." % time.strftime("%a %d %b %Y %H:%M:%S +0000", time.gmtime()) # store a re-runnable copy of the command used to start this batch run try: # pipes.quote is undocumented, so I'm not sure which # versions of python include it (I checked python 2.6 and # 2.7 on linux; they both have it). import pipes quotefn = pipes.quote except (ImportError,AttributeError): # command will need a human to insert quotes before it can be re-used quotefn = lambda x: x command_used_to_start = string.join([quotefn(arg) for arg in sys.argv]) # CBENHANCEMENT: would be nice to separately write out a # runnable script that does everything necessary to # re-generate results (applies diffs etc). # Shadow stdout to a .out file in the output directory, so that # print statements will go to both the file and to stdout. batch_output = open(normalize_path(simpath+".out"),'w') batch_output.write(command_used_to_start+"\n") sys.stdout = MultiFile(batch_output,sys.stdout) print print hostinfo print topographicalocation print topolocation print scriptlocation print print startnote from topo.misc.commandline import auto_import_commands auto_import_commands() # Ensure that saved state includes all parameter values from topo.command import save_script_repr param.parameterized.script_repr_suppress_defaults=False # Save a copy of the script file for reference shutil.copy2(script_file, normalize_path.prefix) shutil.move(normalize_path(scriptbase+".ty"), normalize_path(simpath+".ty")) # Default case: times is just a number that scales a standard list of times times=p.times if not isinstance(times,list): times=[t*times for t in [0,50,100,500,1000,2000,3000,4000,5000,10000]] # Run script in main error_count = 0 initial_warning_count = param.parameterized.warning_count try: execfile(script_file,__main__.__dict__) #global_params.context global_params.check_for_unused_names() if p.save_global_params: _save_parameters(p.extra_keywords(), simpath+".global_params.pickle") print_sizes() topo.sim.name=simname from holoviews.ipython.widgets import ProgressBar, RunProgress import numpy as np ProgressBar.display = p.progress_bar progress_bar = RunProgress(run_hook = topo.sim.run, display = p.progress_bar, interval = p.progress_interval) if len(set(times)) == 1: completion = [0, 100] else: times = np.array(times) completion = 100 * (times - times.min()) / (times.max() - times.min()) completion = np.array([0] + list(completion)) # Run each segment, doing the analysis and saving the script state each time for i, run_to in enumerate(times): progress_bar.percent_range = (completion[i], completion[i+1]) progress_bar(run_to - topo.sim.time()) p.analysis_fn() normalize_path.prefix = metadata_dir if p.save_script_repr == 'first' and run_to == times[0]: save_script_repr() elif p.save_script_repr == 'last' and (run_to == times[-1]): save_script_repr() elif p.save_script_repr == 'all': save_script_repr() normalize_path.prefix = dirpath elapsedtime=time.time()-starttime param.Parameterized(name="run_batch").message( "Elapsed real time %02d:%02d." % (int(elapsedtime/60),int(elapsedtime%60))) if p.snapshot: save_snapshot() except: error_count+=1 import traceback traceback.print_exc(file=sys.stdout) sys.stderr.write("Warning -- Error detected: execution halted.\n") if p.metadata_dir != '' and p.compress_metadata == 'tar.gz': _, name = os.path.split(metadata_dir) tar = tarfile.open(normalize_path("%s.tar.gz" % name), "w:gz") tar.add(metadata_dir, arcname=name) tar.close() shutil.rmtree(metadata_dir) elif p.metadata_dir != '' and p.compress_metadata == 'zip': _, name = os.path.split(metadata_dir) zipf = zipfile.ZipFile(normalize_path("%s.zip" % name), 'w') zipf.write(metadata_dir, arcname=name) for f in os.listdir(metadata_dir): zipf.write(os.path.join(metadata_dir, f), os.path.join(p.metadata_dir,f)) zipf.close() shutil.rmtree(metadata_dir) print "\nBatch run completed at %s." % time.strftime("%a %d %b %Y %H:%M:%S +0000", time.gmtime()) print "There were %d error(s) and %d warning(s)%s." % \ (error_count,(param.parameterized.warning_count-initial_warning_count), ((" (plus %d warning(s) prior to entering run_batch)"%initial_warning_count if initial_warning_count>0 else ""))) # restore stdout sys.stdout = sys.__stdout__ batch_output.close() def wipe_out_activity(): """ Resets activity of all Sheets and their connections to zero. """ # ALERT: this works for now, but it may need to be implemented # recursively using methods implemented separately on each class, # if there are often new types of objects created that store an # activity value. for s in topo.sim.objects(Sheet).values(): s.activity*=0.0 for c in s.in_connections: if hasattr(c,'activity'): c.activity*=0.0 def n_bytes(): """ Estimate the minimum memory needed for the Sheets in this Simulation, in bytes. This estimate is a lower bound only, based primarily on memory for the matrices used for activity and connections. """ return sum([s.n_bytes() for s in topo.sim.objects(Sheet).values()]) def n_conns(): """ Count the number of connections in all ProjectionSheets in the current Simulation. """ return sum([s.n_conns() for s in topo.sim.objects(ProjectionSheet).values()]) def print_sizes(): """Format the results from n_conns() and n_bytes() for use in batch output.""" print "Defined %d-connection network; %0.0fMB required for weight storage." % \ (n_conns(),max(n_bytes()/1024.0/1024.0,1.0)) # added these two function to the PatternDrivenAnalysis hooks PatternDrivenAnalysis.pre_presentation_hooks.append(topo.sim.state_push) PatternDrivenAnalysis.pre_presentation_hooks.append(wipe_out_activity) PatternDrivenAnalysis.pre_presentation_hooks.append(clear_event_queue) PatternDrivenAnalysis.post_presentation_hooks.append(topo.sim.state_pop) # maybe an explicit list would be better? import types _public = list(set([_k for _k,_v in locals().items() if isinstance(_v,types.FunctionType) or (isinstance(_v,type) and issubclass(_v,ParameterizedFunction)) and not _v.__name__.startswith('_')])) _public += [ "_VersionPrinter", "UnpickleEnvironmentCreator", "ImportErrorRaisingFakeModule", "ImportErrorObject", ] # Automatically discover all .py files in this directory. import os,fnmatch __all__ = _public + [f.split('.py')[0] for f in os.listdir(__path__[0]) if fnmatch.fnmatch(f,'[!._]*.py')] del f,os,fnmatch
{ "content_hash": "64af2c33719294eb37e6c209c2199de1", "timestamp": "", "source": "github", "line_count": 978, "max_line_length": 126, "avg_line_length": 38.97648261758691, "alnum_prop": 0.6501482200477452, "repo_name": "mjabri/topographica", "id": "fbc9ef0bc8736706a9e5e879f20bbd577b6c8d68", "size": "38119", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "topo/command/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "14889" }, { "name": "C++", "bytes": "5714" }, { "name": "Elixir", "bytes": "202" }, { "name": "JavaScript", "bytes": "122" }, { "name": "Makefile", "bytes": "15490" }, { "name": "Python", "bytes": "1878339" }, { "name": "Shell", "bytes": "1577" }, { "name": "TeX", "bytes": "253834" } ], "symlink_target": "" }
from __future__ import unicode_literals try: from docutils.core import publish_parts def render_rest(markup, **docutils_settings): docutils_settings.update({ 'raw_enabled': False, 'file_insertion_enabled': False, }) parts = publish_parts( source=markup, writer_name="html4css1", settings_overrides=docutils_settings, ) return parts["html_body"] except ImportError: pass
{ "content_hash": "d4d4b027f24c079b21191f7d63bfe4ca", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 49, "avg_line_length": 25.526315789473685, "alnum_prop": 0.5896907216494846, "repo_name": "zsiciarz/django-markitup", "id": "76cf86b27369e71f193d367cea30b4a959a8a72a", "size": "485", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "markitup/renderers.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "10918" }, { "name": "HTML", "bytes": "2087" }, { "name": "JavaScript", "bytes": "35707" }, { "name": "Python", "bytes": "39614" } ], "symlink_target": "" }
"""Statistics helper for sensor.""" from __future__ import annotations from collections import defaultdict from collections.abc import Callable, Iterable import datetime import itertools import logging import math from typing import Any from sqlalchemy.orm.session import Session from homeassistant.components.recorder import ( history, is_entity_recorded, statistics, util as recorder_util, ) from homeassistant.components.recorder.const import DOMAIN as RECORDER_DOMAIN from homeassistant.components.recorder.models import ( StatisticData, StatisticMetaData, StatisticResult, ) from homeassistant.components.sensor import ( ATTR_STATE_CLASS, DEVICE_CLASS_ENERGY, DEVICE_CLASS_GAS, DEVICE_CLASS_MONETARY, DEVICE_CLASS_PRESSURE, DEVICE_CLASS_TEMPERATURE, STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL, STATE_CLASS_TOTAL_INCREASING, STATE_CLASSES, ) from homeassistant.const import ( ATTR_DEVICE_CLASS, ATTR_UNIT_OF_MEASUREMENT, DEVICE_CLASS_POWER, ENERGY_KILO_WATT_HOUR, ENERGY_MEGA_WATT_HOUR, ENERGY_WATT_HOUR, POWER_KILO_WATT, POWER_WATT, PRESSURE_BAR, PRESSURE_HPA, PRESSURE_INHG, PRESSURE_KPA, PRESSURE_MBAR, PRESSURE_PA, PRESSURE_PSI, TEMP_CELSIUS, TEMP_FAHRENHEIT, TEMP_KELVIN, VOLUME_CUBIC_FEET, VOLUME_CUBIC_METERS, ) from homeassistant.core import HomeAssistant, State from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers.entity import entity_sources import homeassistant.util.dt as dt_util import homeassistant.util.pressure as pressure_util import homeassistant.util.temperature as temperature_util import homeassistant.util.volume as volume_util from . import ATTR_LAST_RESET, DOMAIN _LOGGER = logging.getLogger(__name__) DEVICE_CLASS_STATISTICS: dict[str, dict[str, set[str]]] = { STATE_CLASS_MEASUREMENT: { # Deprecated, support will be removed in Home Assistant 2021.11 DEVICE_CLASS_ENERGY: {"sum"}, DEVICE_CLASS_GAS: {"sum"}, DEVICE_CLASS_MONETARY: {"sum"}, }, STATE_CLASS_TOTAL: {}, STATE_CLASS_TOTAL_INCREASING: {}, } DEFAULT_STATISTICS = { STATE_CLASS_MEASUREMENT: {"mean", "min", "max"}, STATE_CLASS_TOTAL: {"sum"}, STATE_CLASS_TOTAL_INCREASING: {"sum"}, } # Normalized units which will be stored in the statistics table DEVICE_CLASS_UNITS = { DEVICE_CLASS_ENERGY: ENERGY_KILO_WATT_HOUR, DEVICE_CLASS_POWER: POWER_WATT, DEVICE_CLASS_PRESSURE: PRESSURE_PA, DEVICE_CLASS_TEMPERATURE: TEMP_CELSIUS, DEVICE_CLASS_GAS: VOLUME_CUBIC_METERS, } UNIT_CONVERSIONS: dict[str, dict[str, Callable]] = { # Convert energy to kWh DEVICE_CLASS_ENERGY: { ENERGY_KILO_WATT_HOUR: lambda x: x, ENERGY_MEGA_WATT_HOUR: lambda x: x * 1000, ENERGY_WATT_HOUR: lambda x: x / 1000, }, # Convert power W DEVICE_CLASS_POWER: { POWER_WATT: lambda x: x, POWER_KILO_WATT: lambda x: x * 1000, }, # Convert pressure to Pa # Note: pressure_util.convert is bypassed to avoid redundant error checking DEVICE_CLASS_PRESSURE: { PRESSURE_BAR: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_BAR], PRESSURE_HPA: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_HPA], PRESSURE_INHG: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_INHG], PRESSURE_KPA: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_KPA], PRESSURE_MBAR: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_MBAR], PRESSURE_PA: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_PA], PRESSURE_PSI: lambda x: x / pressure_util.UNIT_CONVERSION[PRESSURE_PSI], }, # Convert temperature to °C # Note: temperature_util.convert is bypassed to avoid redundant error checking DEVICE_CLASS_TEMPERATURE: { TEMP_CELSIUS: lambda x: x, TEMP_FAHRENHEIT: temperature_util.fahrenheit_to_celsius, TEMP_KELVIN: temperature_util.kelvin_to_celsius, }, # Convert volume to cubic meter DEVICE_CLASS_GAS: { VOLUME_CUBIC_METERS: lambda x: x, VOLUME_CUBIC_FEET: volume_util.cubic_feet_to_cubic_meter, }, } # Keep track of entities for which a warning about decreasing value has been logged SEEN_DIP = "sensor_seen_total_increasing_dip" WARN_DIP = "sensor_warn_total_increasing_dip" # Keep track of entities for which a warning about negative value has been logged WARN_NEGATIVE = "sensor_warn_total_increasing_negative" # Keep track of entities for which a warning about unsupported unit has been logged WARN_UNSUPPORTED_UNIT = "sensor_warn_unsupported_unit" WARN_UNSTABLE_UNIT = "sensor_warn_unstable_unit" def _get_sensor_states(hass: HomeAssistant) -> list[State]: """Get the current state of all sensors for which to compile statistics.""" all_sensors = hass.states.all(DOMAIN) statistics_sensors = [] for state in all_sensors: if not is_entity_recorded(hass, state.entity_id): continue if (state.attributes.get(ATTR_STATE_CLASS)) not in STATE_CLASSES: continue statistics_sensors.append(state) return statistics_sensors def _time_weighted_average( fstates: list[tuple[float, State]], start: datetime.datetime, end: datetime.datetime ) -> float: """Calculate a time weighted average. The average is calculated by weighting the states by duration in seconds between state changes. Note: there's no interpolation of values between state changes. """ old_fstate: float | None = None old_start_time: datetime.datetime | None = None accumulated = 0.0 for fstate, state in fstates: # The recorder will give us the last known state, which may be well # before the requested start time for the statistics start_time = start if state.last_updated < start else state.last_updated if old_start_time is None: # Adjust start time, if there was no last known state start = start_time else: duration = start_time - old_start_time # Accumulate the value, weighted by duration until next state change assert old_fstate is not None accumulated += old_fstate * duration.total_seconds() old_fstate = fstate old_start_time = start_time if old_fstate is not None: # Accumulate the value, weighted by duration until end of the period assert old_start_time is not None duration = end - old_start_time accumulated += old_fstate * duration.total_seconds() return accumulated / (end - start).total_seconds() def _get_units(fstates: list[tuple[float, State]]) -> set[str | None]: """Return True if all states have the same unit.""" return {item[1].attributes.get(ATTR_UNIT_OF_MEASUREMENT) for item in fstates} def _parse_float(state: str) -> float: """Parse a float string, throw on inf or nan.""" fstate = float(state) if math.isnan(fstate) or math.isinf(fstate): raise ValueError return fstate def _normalize_states( hass: HomeAssistant, session: Session, old_metadatas: dict[str, tuple[int, StatisticMetaData]], entity_history: Iterable[State], device_class: str | None, entity_id: str, ) -> tuple[str | None, list[tuple[float, State]]]: """Normalize units.""" unit = None if device_class not in UNIT_CONVERSIONS: # We're not normalizing this device class, return the state as they are fstates = [] for state in entity_history: try: fstate = _parse_float(state.state) except (ValueError, TypeError): # TypeError to guard for NULL state in DB continue fstates.append((fstate, state)) if fstates: all_units = _get_units(fstates) if len(all_units) > 1: if WARN_UNSTABLE_UNIT not in hass.data: hass.data[WARN_UNSTABLE_UNIT] = set() if entity_id not in hass.data[WARN_UNSTABLE_UNIT]: hass.data[WARN_UNSTABLE_UNIT].add(entity_id) extra = "" if old_metadata := old_metadatas.get(entity_id): extra = ( " and matches the unit of already compiled statistics " f"({old_metadata[1]['unit_of_measurement']})" ) _LOGGER.warning( "The unit of %s is changing, got multiple %s, generation of long term " "statistics will be suppressed unless the unit is stable%s", entity_id, all_units, extra, ) return None, [] unit = fstates[0][1].attributes.get(ATTR_UNIT_OF_MEASUREMENT) return unit, fstates fstates = [] for state in entity_history: try: fstate = _parse_float(state.state) except ValueError: continue unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) # Exclude unsupported units from statistics if unit not in UNIT_CONVERSIONS[device_class]: if WARN_UNSUPPORTED_UNIT not in hass.data: hass.data[WARN_UNSUPPORTED_UNIT] = set() if entity_id not in hass.data[WARN_UNSUPPORTED_UNIT]: hass.data[WARN_UNSUPPORTED_UNIT].add(entity_id) _LOGGER.warning("%s has unknown unit %s", entity_id, unit) continue fstates.append((UNIT_CONVERSIONS[device_class][unit](fstate), state)) return DEVICE_CLASS_UNITS[device_class], fstates def _suggest_report_issue(hass: HomeAssistant, entity_id: str) -> str: """Suggest to report an issue.""" domain = entity_sources(hass).get(entity_id, {}).get("domain") custom_component = entity_sources(hass).get(entity_id, {}).get("custom_component") report_issue = "" if custom_component: report_issue = "report it to the custom component author." else: report_issue = ( "create a bug report at " "https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue" ) if domain: report_issue += f"+label%3A%22integration%3A+{domain}%22" return report_issue def warn_dip(hass: HomeAssistant, entity_id: str, state: State) -> None: """Log a warning once if a sensor with state_class_total has a decreasing value. The log will be suppressed until two dips have been seen to prevent warning due to rounding issues with databases storing the state as a single precision float, which was fixed in recorder DB version 20. """ if SEEN_DIP not in hass.data: hass.data[SEEN_DIP] = set() if entity_id not in hass.data[SEEN_DIP]: hass.data[SEEN_DIP].add(entity_id) return if WARN_DIP not in hass.data: hass.data[WARN_DIP] = set() if entity_id not in hass.data[WARN_DIP]: hass.data[WARN_DIP].add(entity_id) domain = entity_sources(hass).get(entity_id, {}).get("domain") if domain in ["energy", "growatt_server", "solaredge"]: return _LOGGER.warning( "Entity %s %shas state class total_increasing, but its state is " "not strictly increasing. Triggered by state %s with last_updated set to %s. " "Please %s", entity_id, f"from integration {domain} " if domain else "", state.state, state.last_updated.isoformat(), _suggest_report_issue(hass, entity_id), ) def warn_negative(hass: HomeAssistant, entity_id: str, state: State) -> None: """Log a warning once if a sensor with state_class_total has a negative value.""" if WARN_NEGATIVE not in hass.data: hass.data[WARN_NEGATIVE] = set() if entity_id not in hass.data[WARN_NEGATIVE]: hass.data[WARN_NEGATIVE].add(entity_id) domain = entity_sources(hass).get(entity_id, {}).get("domain") _LOGGER.warning( "Entity %s %shas state class total_increasing, but its state is " "negative. Triggered by state %s with last_updated set to %s. Please %s", entity_id, f"from integration {domain} " if domain else "", state.state, state.last_updated.isoformat(), _suggest_report_issue(hass, entity_id), ) def reset_detected( hass: HomeAssistant, entity_id: str, fstate: float, previous_fstate: float | None, state: State, ) -> bool: """Test if a total_increasing sensor has been reset.""" if previous_fstate is None: return False if 0.9 * previous_fstate <= fstate < previous_fstate: warn_dip(hass, entity_id, state) if fstate < 0: warn_negative(hass, entity_id, state) raise HomeAssistantError return fstate < 0.9 * previous_fstate def _wanted_statistics(sensor_states: list[State]) -> dict[str, set[str]]: """Prepare a dict with wanted statistics for entities.""" wanted_statistics = {} for state in sensor_states: state_class = state.attributes[ATTR_STATE_CLASS] device_class = state.attributes.get(ATTR_DEVICE_CLASS) if device_class in DEVICE_CLASS_STATISTICS[state_class]: wanted_statistics[state.entity_id] = DEVICE_CLASS_STATISTICS[state_class][ device_class ] else: wanted_statistics[state.entity_id] = DEFAULT_STATISTICS[state_class] return wanted_statistics def _last_reset_as_utc_isoformat(last_reset_s: Any, entity_id: str) -> str | None: """Parse last_reset and convert it to UTC.""" if last_reset_s is None: return None if isinstance(last_reset_s, str): last_reset = dt_util.parse_datetime(last_reset_s) else: last_reset = None if last_reset is None: _LOGGER.warning( "Ignoring invalid last reset '%s' for %s", last_reset_s, entity_id ) return None return dt_util.as_utc(last_reset).isoformat() def compile_statistics( hass: HomeAssistant, start: datetime.datetime, end: datetime.datetime ) -> list[StatisticResult]: """Compile statistics for all entities during start-end. Note: This will query the database and must not be run in the event loop """ with recorder_util.session_scope(hass=hass) as session: result = _compile_statistics(hass, session, start, end) return result def _compile_statistics( # noqa: C901 hass: HomeAssistant, session: Session, start: datetime.datetime, end: datetime.datetime, ) -> list[StatisticResult]: """Compile statistics for all entities during start-end.""" result: list[StatisticResult] = [] sensor_states = _get_sensor_states(hass) wanted_statistics = _wanted_statistics(sensor_states) old_metadatas = statistics.get_metadata_with_session( hass, session, statistic_ids=[i.entity_id for i in sensor_states] ) # Get history between start and end entities_full_history = [ i.entity_id for i in sensor_states if "sum" in wanted_statistics[i.entity_id] ] history_list = {} if entities_full_history: history_list = history.get_significant_states_with_session( # type: ignore hass, session, start - datetime.timedelta.resolution, end, entity_ids=entities_full_history, significant_changes_only=False, ) entities_significant_history = [ i.entity_id for i in sensor_states if "sum" not in wanted_statistics[i.entity_id] ] if entities_significant_history: _history_list = history.get_significant_states_with_session( # type: ignore hass, session, start - datetime.timedelta.resolution, end, entity_ids=entities_significant_history, ) history_list = {**history_list, **_history_list} # If there are no recent state changes, the sensor's state may already be pruned # from the recorder. Get the state from the state machine instead. for _state in sensor_states: if _state.entity_id not in history_list: history_list[_state.entity_id] = (_state,) for _state in sensor_states: # pylint: disable=too-many-nested-blocks entity_id = _state.entity_id if entity_id not in history_list: continue state_class = _state.attributes[ATTR_STATE_CLASS] device_class = _state.attributes.get(ATTR_DEVICE_CLASS) entity_history = history_list[entity_id] unit, fstates = _normalize_states( hass, session, old_metadatas, entity_history, device_class, entity_id ) if not fstates: continue # Check metadata if old_metadata := old_metadatas.get(entity_id): if old_metadata[1]["unit_of_measurement"] != unit: if WARN_UNSTABLE_UNIT not in hass.data: hass.data[WARN_UNSTABLE_UNIT] = set() if entity_id not in hass.data[WARN_UNSTABLE_UNIT]: hass.data[WARN_UNSTABLE_UNIT].add(entity_id) _LOGGER.warning( "The %sunit of %s (%s) does not match the unit of already " "compiled statistics (%s). Generation of long term statistics " "will be suppressed unless the unit changes back to %s", "normalized " if device_class in DEVICE_CLASS_UNITS else "", entity_id, unit, old_metadata[1]["unit_of_measurement"], old_metadata[1]["unit_of_measurement"], ) continue # Set meta data meta: StatisticMetaData = { "statistic_id": entity_id, "unit_of_measurement": unit, "has_mean": "mean" in wanted_statistics[entity_id], "has_sum": "sum" in wanted_statistics[entity_id], } # Make calculations stat: StatisticData = {"start": start} if "max" in wanted_statistics[entity_id]: stat["max"] = max(*itertools.islice(zip(*fstates), 1)) # type: ignore[typeddict-item] if "min" in wanted_statistics[entity_id]: stat["min"] = min(*itertools.islice(zip(*fstates), 1)) # type: ignore[typeddict-item] if "mean" in wanted_statistics[entity_id]: stat["mean"] = _time_weighted_average(fstates, start, end) if "sum" in wanted_statistics[entity_id]: last_reset = old_last_reset = None new_state = old_state = None _sum = 0.0 last_stats = statistics.get_last_statistics(hass, 1, entity_id, False) if entity_id in last_stats: # We have compiled history for this sensor before, use that as a starting point last_reset = old_last_reset = last_stats[entity_id][0]["last_reset"] new_state = old_state = last_stats[entity_id][0]["state"] _sum = last_stats[entity_id][0]["sum"] or 0.0 for fstate, state in fstates: # Deprecated, will be removed in Home Assistant 2021.11 if ( "last_reset" not in state.attributes and state_class == STATE_CLASS_MEASUREMENT ): continue reset = False if ( state_class != STATE_CLASS_TOTAL_INCREASING and ( last_reset := _last_reset_as_utc_isoformat( state.attributes.get("last_reset"), entity_id ) ) != old_last_reset and last_reset is not None ): if old_state is None: _LOGGER.info( "Compiling initial sum statistics for %s, zero point set to %s", entity_id, fstate, ) else: _LOGGER.info( "Detected new cycle for %s, last_reset set to %s (old last_reset %s)", entity_id, last_reset, old_last_reset, ) reset = True elif old_state is None and last_reset is None: reset = True _LOGGER.info( "Compiling initial sum statistics for %s, zero point set to %s", entity_id, fstate, ) elif state_class == STATE_CLASS_TOTAL_INCREASING: try: if old_state is None or reset_detected( hass, entity_id, fstate, new_state, state ): reset = True _LOGGER.info( "Detected new cycle for %s, value dropped from %s to %s, " "triggered by state with last_updated set to %s", entity_id, new_state, state.last_updated.isoformat(), fstate, ) except HomeAssistantError: continue if reset: # The sensor has been reset, update the sum if old_state is not None: _sum += new_state - old_state # ..and update the starting point new_state = fstate old_last_reset = last_reset # Force a new cycle for an existing sensor to start at 0 if old_state is not None: old_state = 0.0 else: old_state = new_state else: new_state = fstate # Deprecated, will be removed in Home Assistant 2021.11 if last_reset is None and state_class == STATE_CLASS_MEASUREMENT: # No valid updates continue if new_state is None or old_state is None: # No valid updates continue # Update the sum with the last state _sum += new_state - old_state if last_reset is not None: stat["last_reset"] = dt_util.parse_datetime(last_reset) stat["sum"] = _sum stat["state"] = new_state result.append({"meta": meta, "stat": (stat,)}) return result def list_statistic_ids(hass: HomeAssistant, statistic_type: str | None = None) -> dict: """Return statistic_ids and meta data.""" entities = _get_sensor_states(hass) statistic_ids = {} for state in entities: state_class = state.attributes[ATTR_STATE_CLASS] device_class = state.attributes.get(ATTR_DEVICE_CLASS) native_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if device_class in DEVICE_CLASS_STATISTICS[state_class]: provided_statistics = DEVICE_CLASS_STATISTICS[state_class][device_class] else: provided_statistics = DEFAULT_STATISTICS[state_class] if statistic_type is not None and statistic_type not in provided_statistics: continue if ( "sum" in provided_statistics and ATTR_LAST_RESET not in state.attributes and state.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_MEASUREMENT ): continue if device_class not in UNIT_CONVERSIONS: statistic_ids[state.entity_id] = native_unit continue if native_unit not in UNIT_CONVERSIONS[device_class]: continue statistics_unit = DEVICE_CLASS_UNITS[device_class] statistic_ids[state.entity_id] = statistics_unit return statistic_ids def validate_statistics( hass: HomeAssistant, ) -> dict[str, list[statistics.ValidationIssue]]: """Validate statistics.""" validation_result = defaultdict(list) sensor_states = hass.states.all(DOMAIN) metadatas = statistics.get_metadata(hass, statistic_source=RECORDER_DOMAIN) sensor_entity_ids = {i.entity_id for i in sensor_states} sensor_statistic_ids = set(metadatas) for state in sensor_states: entity_id = state.entity_id device_class = state.attributes.get(ATTR_DEVICE_CLASS) state_class = state.attributes.get(ATTR_STATE_CLASS) state_unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if metadata := metadatas.get(entity_id): if not is_entity_recorded(hass, state.entity_id): # Sensor was previously recorded, but no longer is validation_result[entity_id].append( statistics.ValidationIssue( "entity_no_longer_recorded", {"statistic_id": entity_id}, ) ) if state_class not in STATE_CLASSES: # Sensor no longer has a valid state class validation_result[entity_id].append( statistics.ValidationIssue( "unsupported_state_class", {"statistic_id": entity_id, "state_class": state_class}, ) ) metadata_unit = metadata[1]["unit_of_measurement"] if device_class not in UNIT_CONVERSIONS: if state_unit != metadata_unit: # The unit has changed validation_result[entity_id].append( statistics.ValidationIssue( "units_changed", { "statistic_id": entity_id, "state_unit": state_unit, "metadata_unit": metadata_unit, }, ) ) elif metadata_unit != DEVICE_CLASS_UNITS[device_class]: # The unit in metadata is not supported for this device class validation_result[entity_id].append( statistics.ValidationIssue( "unsupported_unit_metadata", { "statistic_id": entity_id, "device_class": device_class, "metadata_unit": metadata_unit, "supported_unit": DEVICE_CLASS_UNITS[device_class], }, ) ) elif state_class in STATE_CLASSES: if not is_entity_recorded(hass, state.entity_id): # Sensor is not recorded validation_result[entity_id].append( statistics.ValidationIssue( "entity_not_recorded", {"statistic_id": entity_id}, ) ) if ( state_class in STATE_CLASSES and device_class in UNIT_CONVERSIONS and state_unit not in UNIT_CONVERSIONS[device_class] ): # The unit in the state is not supported for this device class validation_result[entity_id].append( statistics.ValidationIssue( "unsupported_unit_state", { "statistic_id": entity_id, "device_class": device_class, "state_unit": state_unit, }, ) ) for statistic_id in sensor_statistic_ids - sensor_entity_ids: # There is no sensor matching the statistics_id validation_result[statistic_id].append( statistics.ValidationIssue( "no_state", { "statistic_id": statistic_id, }, ) ) return validation_result
{ "content_hash": "63a4bf4c6ef57f77dfbd46a2d5b34bc3", "timestamp": "", "source": "github", "line_count": 754, "max_line_length": 98, "avg_line_length": 37.97214854111406, "alnum_prop": 0.5747266948412559, "repo_name": "lukas-hetzenecker/home-assistant", "id": "9422e51f5a6adfcc0b245b053f926b203ecca38b", "size": "28632", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/sensor/recorder.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2443" }, { "name": "Python", "bytes": "38023745" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-attach-interfaces' POLICY_ROOT = 'os_compute_api:os-attach-interfaces:%s' attach_interfaces_policies = [ policy.DocumentedRuleDefault( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, "List port interfaces or show details of a port interface attached " "to a server", [ { 'method': 'GET', 'path': '/servers/{server_id}/os-interface' }, { 'method': 'GET', 'path': '/servers/{server_id}/os-interface/{port_id}' } ]), policy.DocumentedRuleDefault( POLICY_ROOT % 'create', base.RULE_ADMIN_OR_OWNER, "Attach an interface to a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/os-interface' } ]), policy.DocumentedRuleDefault( POLICY_ROOT % 'delete', base.RULE_ADMIN_OR_OWNER, "Detach an interface from a server", [ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-interface/{port_id}' } ]) ] def list_rules(): return attach_interfaces_policies
{ "content_hash": "f2e659b457d4ab7c00d2e7d12abad921", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 76, "avg_line_length": 26.52, "alnum_prop": 0.5128205128205128, "repo_name": "phenoxim/nova", "id": "c9b6677d09877a3cab7ca7da8eb80f71acd1d497", "size": "1965", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "nova/policies/attach_interfaces.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "16289098" }, { "name": "Shell", "bytes": "20716" }, { "name": "Smarty", "bytes": "282020" } ], "symlink_target": "" }
import RPi.GPIO as gpio import time def distance(measure='cm'): try: gpio.setmode(gpio.BOARD) gpio.setup(12, gpio.OUT) gpio.setup(16, gpio.IN) gpio.output(12, False) while gpio.input(16) == 0: nosig = time.time() while gpio.input(16) == 1: sig = time.time() tl = sig - nosig if measure == 'cm': distance = tl / 0.000058 elif measure == 'in': distance = tl / 0.000148 else: print('improper choice of measurement: in or cm') distance = None gpio.cleanup() return distance except: distance = 100 gpio.cleanup() return distance if __name__ == "__main__": print(distance("cm"))
{ "content_hash": "cfc676abe0826f39eddd7524984edc25", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 61, "avg_line_length": 22.62857142857143, "alnum_prop": 0.5, "repo_name": "PythonProgramming/Robotics-with-Raspberry-Pi", "id": "fd8880b5cb181ec73a70f67b6f0ddb9bda2a0212", "size": "792", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sensor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "10875" } ], "symlink_target": "" }
import os import sys from distutils.core import setup from distutils.sysconfig import get_python_lib # Warn if we are installing over top of an existing installation. This can # cause issues where files that were deleted from a more recent Django are # still present in site-packages. See #18115. overlay_warning = False if "install" in sys.argv: # We have to try also with an explicit prefix of /usr/local in order to # catch Debian's custom user site-packages directory. for lib_path in get_python_lib(), get_python_lib(prefix="/usr/local"): existing_path = os.path.abspath(os.path.join(lib_path, "django")) if os.path.exists(existing_path): # We note the need for the warning here, but present it after the # command is run, so it's more likely to be seen. overlay_warning = True break def fullsplit(path, result=None): """ Split a pathname into components (the opposite of os.path.join) in a platform-neutral way. """ if result is None: result = [] head, tail = os.path.split(path) if head == '': return [tail] + result if head == path: return result return fullsplit(head, [tail] + result) EXCLUDE_FROM_PACKAGES = ['django.conf.project_template', 'django.conf.app_template', 'django.bin'] def is_package(package_name): for pkg in EXCLUDE_FROM_PACKAGES: if package_name.startswith(pkg): return False return True # Compile the list of packages available, because distutils doesn't have # an easy way to do this. packages, package_data = [], {} root_dir = os.path.dirname(__file__) if root_dir != '': os.chdir(root_dir) django_dir = 'django' for dirpath, dirnames, filenames in os.walk(django_dir): # Ignore PEP 3147 cache dirs and those whose names start with '.' dirnames[:] = [d for d in dirnames if not d.startswith('.') and d != '__pycache__'] parts = fullsplit(dirpath) package_name = '.'.join(parts) if '__init__.py' in filenames and is_package(package_name): packages.append(package_name) elif filenames: relative_path = [] while '.'.join(parts) not in packages: relative_path.append(parts.pop()) relative_path.reverse() path = os.path.join(*relative_path) package_files = package_data.setdefault('.'.join(parts), []) package_files.extend([os.path.join(path, f) for f in filenames]) # Dynamically calculate the version based on django.VERSION. version = __import__('django').get_version() setup( name='Django', version=version, url='http://www.djangoproject.com/', author='Django Software Foundation', author_email='foundation@djangoproject.com', description=('A high-level Python Web framework that encourages ' 'rapid development and clean, pragmatic design.'), download_url='https://www.djangoproject.com/m/releases/1.5/Django-1.5.5.tar.gz', license='BSD', packages=packages, package_data=package_data, scripts=['django/bin/django-admin.py'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) if overlay_warning: sys.stderr.write(""" ======== WARNING! ======== You have just installed Django over top of an existing installation, without removing it first. Because of this, your install may now include extraneous files from a previous version that have since been removed from Django. This is known to cause a variety of problems. You should manually remove the %(existing_path)s directory and re-install Django. """ % {"existing_path": existing_path})
{ "content_hash": "0950489c286bc2d7564505146b8e4dcc", "timestamp": "", "source": "github", "line_count": 133, "max_line_length": 87, "avg_line_length": 34.150375939849624, "alnum_prop": 0.6424482606781153, "repo_name": "bliti/django-nonrel-1.5", "id": "4682031af9e86c8b863cdf84fa69875e7c3a6ba5", "size": "4542", "binary": false, "copies": "2", "ref": "refs/heads/nonrel-1.5", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "50704" }, { "name": "JavaScript", "bytes": "94313" }, { "name": "Python", "bytes": "8374970" }, { "name": "Shell", "bytes": "12151" } ], "symlink_target": "" }
from django.test import TestCase from django.core.urlresolvers import reverse from django.utils.text import truncate_words from umessages.models import Message, MessageRecipient, MessageContact from umessages.utils import get_user_model User = get_user_model() class MessageContactTests(TestCase): fixtures = ['users', 'messages'] def test_string_formatting(self): """ Test the human representation of a message """ contact = MessageContact.objects.get(pk=1) correct_format = "john and jane" self.failUnlessEqual(contact.__unicode__(), correct_format) def test_opposite_user(self): """ Test if the opposite user is returned """ contact = MessageContact.objects.get(pk=1) john = User.objects.get(pk=1) jane = User.objects.get(pk=2) # Test the opposites self.failUnlessEqual(contact.opposite_user(john), jane) self.failUnlessEqual(contact.opposite_user(jane), john) class MessageModelTests(TestCase): fixtures = ['users', 'messages'] def test_string_formatting(self): """ Test the human representation of a message """ message = Message.objects.get(pk=1) truncated_body = truncate_words(message.body, 10) self.failUnlessEqual(message.__unicode__(), truncated_body) class MessageRecipientModelTest(TestCase): fixtures = ['users', 'messages'] def test_string_formatting(self): """ Test the human representation of a recipient """ recipient = MessageRecipient.objects.get(pk=1) valid_unicode = '%s' % (recipient.message) self.failUnlessEqual(recipient.__unicode__(), valid_unicode) def test_new(self): """ Test if the message that is new is correct """ new_message = MessageRecipient.objects.get(pk=1) read_message = MessageRecipient.objects.get(pk=2) self.failUnless(new_message.is_read()) self.failIf(read_message.is_read())
{ "content_hash": "795a76262eff95ff8cd09445564f9b30", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 70, "avg_line_length": 34.52459016393443, "alnum_prop": 0.6291547958214625, "repo_name": "euanlau/django-umessages", "id": "70ec26d4e8b46f742449080bf4fae0d0ba3dd2c0", "size": "2106", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "umessages/tests/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "8699" }, { "name": "Python", "bytes": "70531" } ], "symlink_target": "" }
import json import logging import os import sys from types import FrameType from typing import Any, Dict, List, Optional, TYPE_CHECKING, Text import rasa.shared.utils.cli import rasa.shared.utils.io if TYPE_CHECKING: from questionary import Question logger = logging.getLogger(__name__) FREE_TEXT_INPUT_PROMPT = "Type out your own message..." def get_validated_path( current: Optional[Text], parameter: Text, default: Optional[Text] = None, none_is_valid: bool = False, ) -> Optional[Text]: """Check whether a file path or its default value is valid and returns it. Args: current: The parsed value. parameter: The name of the parameter. default: The default value of the parameter. none_is_valid: `True` if `None` is valid value for the path, else `False`` Returns: The current value if it was valid, else the default value of the argument if it is valid, else `None`. """ if current is None or current is not None and not os.path.exists(current): if default is not None and os.path.exists(default): reason_str = f"'{current}' not found." if current is None: reason_str = f"Parameter '{parameter}' not set." else: rasa.shared.utils.io.raise_warning( f"The path '{current}' does not seem to exist. Using the " f"default value '{default}' instead." ) logger.debug(f"{reason_str} Using default location '{default}' instead.") current = default elif none_is_valid: current = None else: cancel_cause_not_found(current, parameter, default) return current def missing_config_keys(path: Text, mandatory_keys: List[Text]) -> List[Text]: import rasa.utils.io if not os.path.exists(path): return mandatory_keys config_data = rasa.shared.utils.io.read_config_file(path) return [k for k in mandatory_keys if k not in config_data or config_data[k] is None] def cancel_cause_not_found( current: Optional[Text], parameter: Text, default: Optional[Text] ) -> None: """Exits with an error because the given path was not valid. Args: current: The path given by the user. parameter: The name of the parameter. default: The default value of the parameter. """ default_clause = "" if default: default_clause = f"use the default location ('{default}') or " rasa.shared.utils.cli.print_error( "The path '{}' does not exist. Please make sure to {}specify it" " with '--{}'.".format(current, default_clause, parameter) ) sys.exit(1) def parse_last_positional_argument_as_model_path() -> None: """Fixes the parsing of a potential positional model path argument.""" if ( len(sys.argv) >= 2 # support relevant commands ... and sys.argv[1] in ["run", "shell", "interactive"] # but avoid interpreting subparser commands as model paths and sys.argv[1:] != ["run", "actions"] and not sys.argv[-2].startswith("-") and os.path.exists(sys.argv[-1]) ): sys.argv.append(sys.argv[-1]) sys.argv[-2] = "--model" def button_to_string(button: Dict[Text, Any], idx: int = 0) -> Text: """Create a string representation of a button.""" title = button.pop("title", "") if "payload" in button: payload = " ({})".format(button.pop("payload")) else: payload = "" # if there are any additional attributes, we append them to the output if button: details = " - {}".format(json.dumps(button, sort_keys=True)) else: details = "" button_string = "{idx}: {title}{payload}{details}".format( idx=idx + 1, title=title, payload=payload, details=details ) return button_string def element_to_string(element: Dict[Text, Any], idx: int = 0) -> Text: """Create a string representation of an element.""" title = element.pop("title", "") element_string = "{idx}: {title} - {element}".format( idx=idx + 1, title=title, element=json.dumps(element, sort_keys=True) ) return element_string def button_choices_from_message_data( message: Dict[Text, Any], allow_free_text_input: bool = True ) -> List[Text]: """Return list of choices to present to the user. If allow_free_text_input is True, an additional option is added at the end along with the response buttons that allows the user to type in free text. """ choices = [ button_to_string(button, idx) for idx, button in enumerate(message.get("buttons")) ] if allow_free_text_input: choices.append(FREE_TEXT_INPUT_PROMPT) return choices def payload_from_button_question(button_question: "Question") -> Text: """Prompt user with a button question and returns the nlu payload.""" response = button_question.ask() if response != FREE_TEXT_INPUT_PROMPT: # Extract intent slash command if it's a button response = response[response.find("(") + 1 : response.find(")")] return response def signal_handler(_: int, __: FrameType) -> None: """Kills Rasa when OS signal is received.""" print("Goodbye 👋") sys.exit(0)
{ "content_hash": "79c288f73a8ed49592c4522ec8561b21", "timestamp": "", "source": "github", "line_count": 171, "max_line_length": 88, "avg_line_length": 31.263157894736842, "alnum_prop": 0.6227085671530116, "repo_name": "RasaHQ/rasa_nlu", "id": "fc9a1d5e9595183eb956bf13471e7f1aaad6c707", "size": "5349", "binary": false, "copies": "1", "ref": "refs/heads/emptystring_10504", "path": "rasa/cli/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "705" }, { "name": "HTML", "bytes": "3462" }, { "name": "Makefile", "bytes": "1044" }, { "name": "Python", "bytes": "1467067" }, { "name": "Shell", "bytes": "941" } ], "symlink_target": "" }
from setuptools import setup def get_version(): with open("dotjs.py", "r") as fp: for line in fp: if line.startswith("__version__"): return eval(line.split("=")[-1]) def read(filename): with open(filename, "r") as fp: return fp.read() setup( name="dotjs", version=get_version(), description="A Python implementation of the dotjs HTTP server", long_description=read("README.rst"), author="Paul Hooijenga", author_email="paulhooijenga@gmail.com", url="https://github.com/hackedd/python-dotjs", license="MIT", py_modules=["dotjs"], entry_points={ "console_scripts": [ "dotjs = dotjs:_main", ], "gui_scripts": [ "dotjsw = dotjs:_win_main", ], }, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", ], )
{ "content_hash": "0f6a08ad4ed166c74a262acbf075f6e2", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 67, "avg_line_length": 26.302325581395348, "alnum_prop": 0.5587975243147657, "repo_name": "tommytwoeyes/python-dotjs", "id": "80b6942482c33afb4fdb4871107922edf33041a1", "size": "1153", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "9008" } ], "symlink_target": "" }
import sqlalchemy as sql def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = sql.MetaData() meta.bind = migrate_engine endpoint_group_table = sql.Table( 'endpoint_group', meta, sql.Column('id', sql.String(64), primary_key=True), sql.Column('name', sql.String(255), nullable=False), sql.Column('description', sql.Text, nullable=True), sql.Column('filters', sql.Text(), nullable=False)) endpoint_group_table.create(migrate_engine, checkfirst=True) project_endpoint_group_table = sql.Table( 'project_endpoint_group', meta, sql.Column('endpoint_group_id', sql.String(64), sql.ForeignKey('endpoint_group.id'), nullable=False), sql.Column('project_id', sql.String(64), nullable=False), sql.PrimaryKeyConstraint('endpoint_group_id', 'project_id')) project_endpoint_group_table.create(migrate_engine, checkfirst=True) def downgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine # Operations to reverse the above upgrade go here. for table_name in ['project_endpoint_group', 'endpoint_group']: table = sql.Table(table_name, meta, autoload=True) table.drop()
{ "content_hash": "4a76005a4aa0afd2a5614e9fde557120", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 72, "avg_line_length": 37.432432432432435, "alnum_prop": 0.631768953068592, "repo_name": "rushiagr/keystone", "id": "5f80160a70f929f334d270b9092741f99f9deb1b", "size": "1974", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "665" }, { "name": "Python", "bytes": "3739901" }, { "name": "Shell", "bytes": "10877" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('assets', '0004_auto_20171202_0517'), ] operations = [ migrations.AddField( model_name='asset', name='supersedes', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.Asset'), ), ]
{ "content_hash": "98a3dc9401b7647ee5697ad7b8b2a6e8", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 111, "avg_line_length": 24.842105263157894, "alnum_prop": 0.6334745762711864, "repo_name": "AlmostBetterNetwork/podmaster-host", "id": "833037da84f2bab8586f44d58d02d1d515b2c3aa", "size": "543", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "assets/migrations/0005_asset_supersedes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "59857" }, { "name": "HTML", "bytes": "130751" }, { "name": "JavaScript", "bytes": "46479" }, { "name": "Python", "bytes": "200422" } ], "symlink_target": "" }
import os import imaplib import re import email import time try: MODULE = os.path.dirname(__file__) except: MODULE = "" # Import the Cache class from pattern.web so e-mails can be cached locally (faster): try: from ..cache import cache except: try: import os, sys; sys.path.append(os.path.join(MODULE, "..")) from cache import cache except: try: from pattern.web.cache import cache except: cache = {} #### STRING FUNCTIONS ############################################################################## def decode_utf8(string): """ Returns the given string as a unicode string (if possible). """ if isinstance(string, str): for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")): try: return string.decode(*encoding) except: pass return string return unicode(string) def encode_utf8(string): """ Returns the given string as a Python byte string (if possible). """ if isinstance(string, unicode): try: return string.encode("utf-8") except: return string return str(string) #### IMAP4 SSL ##################################################################################### # Fixes an issue in Python 2.5- with memory allocation. # See: http://bugs.python.org/issue1389051 class IMAP4(imaplib.IMAP4): pass class IMAP4_SSL(imaplib.IMAP4_SSL): def read(self, size): """Read 'size' bytes from remote.""" # sslobj.read() sometimes returns < size bytes chunks = [] read = 0 while read < size: data = self.sslobj.read(min(size-read, 16384)) # use min() instead of max(). read += len(data) chunks.append(data) return ''.join(chunks) #### MAIL ########################################################################################## GMAIL = "imap.gmail.com" DATE, FROM, SUBJECT, BODY, ATTACHMENTS = \ "date", "from", "subject", "body", "attachments" def _basename(folder): # [Gmail]/INBOX => inbox f = folder.replace("[Gmail]/","") f = f.replace("[Gmail]","") f = f.replace("Mail", "") # "Sent Mail" alias = "sent". f = f.replace("INBOX.", "") # "inbox.sent" alias = "sent". f = f.lower() f = f.strip() return f class MailError(Exception): pass class MailServiceError(MailError): pass class MailLoginError(MailError): pass class MailNotLoggedIn(MailError): pass class Mail(object): def __init__(self, username, password, service=GMAIL, port=993, secure=True): """ IMAP4 connection to a mailbox. With secure=True, SSL is used. The standard port for SSL is 993. The standard port without SSL is 143. """ self._username = username self._password = password self._host = service self._port = port self._secure = secure self._imap4 = None self._folders = None self.login(username, password) @property def _id(self): return "%s:%s@%s:%s" % (self._username, self._password, self._host, self._port) @property def imap4(self): if self._imap4 is None: raise MailNotLoggedIn return self._imap4 def login(self, username, password, **kwargs): """ Signs in to the mail account with the given username and password, raises a MailLoginError otherwise. """ self.logout() self._secure = kwargs.get("secure", self._secure) self._imap4 = (self._secure and IMAP4_SSL or IMAP4)(self._host, self._port) try: status, response = self._imap4.login(username, password) except: raise MailLoginError if status != "OK": raise MailLoginError, response def logout(self): """ Signs out of the mail account. """ if self._imap4 is not None: self._imap4.logout() self._imap4 = None def __del__(self): if "_imap4" in self.__dict__: if self._imap4 is not None: self._imap4.logout() self._imap4 = None @property def folders(self): """ A dictionary of (name, MailFolder)-tuples. Default folders: inbox, trash, spam, receipts, ... """ if self._folders is None: status, response = self.imap4.list() self._folders = [f.split(" \"")[-1].strip(" \"") for f in response] self._folders = [(_basename(f), MailFolder(self, f)) for f in self._folders] self._folders = [(f, o) for f, o in self._folders if f != ""] self._folders = dict(self._folders) return self._folders def __getattr__(self, k): """ Each folder is accessible as Mail.[name]. """ if k in self.__dict__: return self.__dict__[k] if k in self.folders: return self.folders[k] raise AttributeError, "'Mail' object has no attribute '%s'" % k #--- MAIL FOLDER ----------------------------------------------------------------------------------- def _decode(s, message): try: # Decode message Content-Type charset to Unicode. # If all fails, try Latin-1 (common case). e = message.get("Content-Type") e = e.split("charset=")[-1].split(";")[0].strip("\"'").lower() s = s.decode(e) except: try: s = s.decode("utf-8") except: try: s = s.decode("latin-1") except: pass return s class MailFolder: def __init__(self, parent, name): """ A folder (inbox, spam, trash, ...) in a mailbox. E-mail messages can be searched and retrieved (including attachments) from a folder. """ self._parent = parent self._name = name @property def parent(self): return self._parent @property def name(self): return _basename(self._name) @property def count(self): return len(self) def search(self, q, field=FROM, cached=False): """ Returns a list of indices for the given query, latest-first. The search field can be FROM, DATE or SUBJECT. """ id = "mail-%s-%s-%s-%s" % (self.parent._id, self.name, q, field) if cached and id in cache: status, response = "OK", [cache[id]] else: status, response = self.parent.imap4.select(self._name, readonly=1) status, response = self.parent.imap4.search(None, field.upper(), q) if cached: cache[id] = response[0] return sorted([int(i)-1 for i in response[0].split()], reverse=True) def read(self, i, attachments=False, cached=True): return self.__getitem__(i, attachments, cached) def __getitem__(self, i, attachments=False, cached=True): """ Returns the mail message with the given index. Each message is a dictionary with date, from, subject, body, attachments entries. The attachments entry is a list of (MIME-type, str)-tuples. """ i += 1 id = "mail-%s-%s-%s-%s" % (self.parent._id, self.name, i, attachments) if cached and id in cache: m = cache[id] else: # Select the current mail folder. # Get the e-mail header. # Get the e-mail body, with or without file attachments. status, response = self.parent.imap4.select(self._name, readonly=1) status, response1 = self.parent.imap4.fetch(str(i), '(BODY.PEEK[HEADER])') status, response2 = self.parent.imap4.fetch(str(i), '(BODY.PEEK[%s])' % (not attachments and "TEXT" or "")) time.sleep(0.1) m = response1[0][1] + response2[0][1] # Cache the raw message for faster retrieval. if cached: cache[id] = m # Parse the raw message. m = email.message_from_string(encode_utf8(m)) d = Message([ (DATE, _decode(m.get(DATE), m)), (FROM, _decode(m.get(FROM), m)), (SUBJECT, _decode(m.get(SUBJECT), m)), (BODY, ""), (ATTACHMENTS, [])]) # Message body can be a list of parts, including file attachments. for p in (m.is_multipart() and m.get_payload() or [m]): if p.get_content_type() == "text/plain": d[BODY] += _decode(p.get_payload(decode=True), p) elif attachments: d[ATTACHMENTS].append((p.get_content_type(), p.get_payload())) for k in d: if isinstance(d[k], basestring): d[k] = d[k].strip() d[k] = d[k].replace("\r\n", "\n") return d def __iter__(self): """ Returns an iterator over all the messages in the folder, latest-first. """ for i in reversed(range(len(self))): yield self[i] def __len__(self): status, response = self.parent.imap4.select(self.name, readonly=1) return int(response[0]) def __repr__(self): return "MailFolder(name=%s)" % repr(self.name) #--- MAIL MESSAGE ---------------------------------------------------------------------------------- class Message(dict): @property def author(self): return self.get(FROM, None) @property def date(self): return self.get(DATE, None) @property def subject(self): return self.get(SUBJECT, "") @property def body(self): return self.get(BODY, "") @property def attachments(self): return self.get(ATTACHMENTS, []) @property def email_address(self): m = re.search(r"<(.*?)>", self.author) return m and m.group(1) or "" def __repr__(self): return "Message(from=%s, subject=%s)" % ( repr(self.author), repr(self.subject))
{ "content_hash": "b08e28cdc344881098240d0378050f5a", "timestamp": "", "source": "github", "line_count": 305, "max_line_length": 119, "avg_line_length": 33.13770491803279, "alnum_prop": 0.523498565350747, "repo_name": "decebel/dataAtom_alpha", "id": "ae2ee31a089216b15f4da8db491828d0d3a0a732", "size": "10493", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bin/plug/py/external/pattern/web/imap/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "485271" }, { "name": "C++", "bytes": "797264" }, { "name": "JavaScript", "bytes": "192237" }, { "name": "Objective-C", "bytes": "13917" }, { "name": "Python", "bytes": "1608265" } ], "symlink_target": "" }
""" Autocompletion example. Press [Tab] to complete the current word. - The first Tab press fills in the common part of all completions. - The second Tab press shows all the completions. (In the menu) - Any following tab press cycles through all the possible completions. """ from __future__ import unicode_literals from prompt_toolkit.contrib.completers import WordCompleter from prompt_toolkit import prompt animal_completer = WordCompleter([ 'alligator', 'ant', 'ape', 'bat', 'bear', 'beaver', 'bee', 'bison', 'butterfly', 'cat', 'chicken', 'crocodile', 'dinosaur', 'dog', 'dolphine', 'dove', 'duck', 'eagle', 'elephant', 'fish', 'goat', 'gorilla', 'kangoroo', 'leopard', 'lion', 'mouse', 'rabbit', 'rat', 'snake', 'spider', 'turkey', 'turtle', ], ignore_case=True) def main(): text = prompt('Give some animals: ', completer=animal_completer) print('You said: %s' % text) if __name__ == '__main__': main()
{ "content_hash": "6228a126890a51c0bd55f632853d3c66", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 70, "avg_line_length": 18.54385964912281, "alnum_prop": 0.5941343424787133, "repo_name": "niklasf/python-prompt-toolkit", "id": "1b20a90f4bf6c4a2415979b1392982aa22f3fb7a", "size": "1079", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/autocompletion.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "635808" } ], "symlink_target": "" }
"""Utility functions for NSynth.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib # internal imports import librosa import numpy as np import scipy.io.wavfile import tensorflow as tf slim = tf.contrib.slim #=============================================================================== # WaveNet Functions #=============================================================================== def get_module(module_path): """Imports module from NSynth directory. Args: module_path: Path to module separated by dots. -> "configs.linear" Returns: module: Imported module. """ import_path = "magenta.models.nsynth." module = importlib.import_module(import_path + module_path) return module def load_wav(path): """Load a wav file and convert to floats within [-1, 1]. Args: path: The CNS file path from which we load. Returns: The 16bit data in the range [-1, 1]. """ _, data_16bit = scipy.io.wavfile.read(path) # Assert we are working with 16-bit audio. #assert data_16bit.dtype == np.int16 return data_16bit.astype(np.float32) #/ 2**15 def mu_law(x, mu=255): """A TF implementation of Mu-Law encoding. Args: x: The audio samples to encode. mu: The Mu to use in our Mu-Law. Returns: out: The Mu-Law encoded int8 data. """ out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu) out = tf.cast(tf.floor(out * 128), tf.int8) return out def inv_mu_law(x, mu=255): """A TF implementation of inverse Mu-Law. Args: x: The Mu-Law samples to decode. mu: The Mu we used to encode these samples. Returns: out: The decoded data. """ x = tf.cast(x, tf.float32) out = (x + 0.5) * 2. / (mu + 1) out = tf.sign(out) / mu * ((1 + mu)**tf.abs(out) - 1) out = tf.where(tf.equal(x, 0), x, out) return out #=============================================================================== # Baseline Functions #=============================================================================== #--------------------------------------------------- # Pre/Post-processing #--------------------------------------------------- def get_optimizer(learning_rate, hparams): """Get the tf.train.Optimizer for this optimizer string. Args: learning_rate: The learning_rate tensor. hparams: TF.HParams object with the optimizer and momentum values. Returns: optimizer: The tf.train.Optimizer based on the optimizer string. """ return { "rmsprop": tf.RMSPropOptimizer( learning_rate, decay=0.95, momentum=hparams.momentum, epsilon=1e-4), "adam": tf.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8), "adagrad": tf.AdagradOptimizer(learning_rate, initial_accumulator_value=1.0), "mom": tf.MomentumOptimizer(learning_rate, momentum=hparams.momentum), "sgd": tf.GradientDescentOptimizer(learning_rate) }.get(hparams.optimizer) def specgram(audio, n_fft=512, hop_length=None, mask=True, log_mag=True, re_im=False, dphase=True, mag_only=False): """Spectrogram using librosa. Args: audio: 1-D array of float32 sound samples. n_fft: Size of the FFT. hop_length: Stride of FFT. Defaults to n_fft/2. mask: Mask the phase derivative by the magnitude. log_mag: Use the logamplitude. re_im: Output Real and Imag. instead of logMag and dPhase. dphase: Use derivative of phase instead of phase. mag_only: Don't return phase. Returns: specgram: [n_fft/2 + 1, audio.size / hop_length, 2]. The first channel is the logamplitude and the second channel is the derivative of phase. """ if not hop_length: hop_length = int(n_fft / 2.) fft_config = dict(n_fft=n_fft, win_length=n_fft, hop_length=hop_length, center=True) spec = librosa.stft(audio, **fft_config) if re_im: re = spec.real[:, :, np.newaxis] im = spec.imag[:, :, np.newaxis] spec_real = np.concatenate((re, im), axis=2) else: mag, phase = librosa.core.magphase(spec) phase_angle = np.angle(phase) # Magnitudes, scaled 0-1 if log_mag: mag = (librosa.logamplitude( mag**2, amin=1e-13, top_db=120., ref_power=np.max) / 120.) + 1 else: mag /= mag.max() if dphase: # Derivative of phase phase_unwrapped = np.unwrap(phase_angle) p = phase_unwrapped[:, 1:] - phase_unwrapped[:, :-1] p = np.concatenate([phase_unwrapped[:, 0:1], p], axis=1) / np.pi else: # Normal phase p = phase_angle / np.pi # Mask the phase if log_mag and mask: p = mag * p # Return Mag and Phase p = p.astype(np.float32)[:, :, np.newaxis] mag = mag.astype(np.float32)[:, :, np.newaxis] if mag_only: spec_real = mag[:, :, np.newaxis] else: spec_real = np.concatenate((mag, p), axis=2) return spec_real def inv_magphase(mag, phase_angle): phase = np.cos(phase_angle) + 1.j * np.sin(phase_angle) return mag * phase def griffin_lim(mag, phase_angle, n_fft, hop, num_iters): """Iterative algorithm for phase retrival from a magnitude spectrogram. Args: mag: Magnitude spectrogram. phase_angle: Initial condition for phase. n_fft: Size of the FFT. hop: Stride of FFT. Defaults to n_fft/2. num_iters: Griffin-Lim iterations to perform. Returns: audio: 1-D array of float32 sound samples. """ fft_config = dict(n_fft=n_fft, win_length=n_fft, hop_length=hop, center=True) ifft_config = dict(win_length=n_fft, hop_length=hop, center=True) complex_specgram = inv_magphase(mag, phase_angle) for i in xrange(num_iters): audio = librosa.istft(complex_specgram, **ifft_config) if i != num_iters - 1: complex_specgram = librosa.stft(audio, **fft_config) _, phase = librosa.magphase(complex_specgram) phase_angle = np.angle(phase) complex_specgram = inv_magphase(mag, phase_angle) return audio def ispecgram(spec, n_fft=512, hop_length=None, mask=True, log_mag=True, re_im=False, dphase=True, mag_only=True, num_iters=1000): """Inverse Spectrogram using librosa. Args: spec: 3-D specgram array [freqs, time, (mag_db, dphase)]. n_fft: Size of the FFT. hop_length: Stride of FFT. Defaults to n_fft/2. mask: Reverse the mask of the phase derivative by the magnitude. log_mag: Use the logamplitude. re_im: Output Real and Imag. instead of logMag and dPhase. dphase: Use derivative of phase instead of phase. mag_only: Specgram contains no phase. num_iters: Number of griffin-lim iterations for mag_only. Returns: audio: 1-D array of sound samples. Peak normalized to 1. """ if not hop_length: hop_length = n_fft // 2 ifft_config = dict(win_length=n_fft, hop_length=hop_length, center=True) if mag_only: mag = spec[:, :, 0] phase_angle = np.pi*np.random.rand(*mag.shape) elif re_im: spec_real = spec[:, :, 0] + 1.j * spec[:, :, 1] else: mag, p = spec[:, :, 0], spec[:, :, 1] if mask and log_mag: p /= (mag + 1e-13*np.random.randn(*mag.shape)) if dphase: # Roll up phase phase_angle = np.cumsum(p*np.pi, axis=1) else: phase_angle = p * np.pi # Magnitudes if log_mag: mag = (mag - 1.0) * 120.0 mag = 10**(mag / 20.0) phase = np.cos(phase_angle) + 1.j*np.sin(phase_angle) spec_real = mag * phase if mag_only: audio = griffin_lim( mag, phase_angle, n_fft, hop_length, num_iters=num_iters) else: audio = librosa.core.istft(spec_real, **ifft_config) return np.squeeze(audio / audio.max()) def batch_specgram(audio, n_fft=512, hop_length=None, mask=True, log_mag=True, re_im=False, dphase=True, mag_only=False): assert len(audio.shape) == 2 batch_size = audio.shape[0] res = [] for b in range(batch_size): res.append( specgram(audio[b], n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only)) return np.array(res) def batch_ispecgram(spec, n_fft=512, hop_length=None, mask=True, log_mag=True, re_im=False, dphase=True, mag_only=False, num_iters=1000): assert len(spec.shape) == 4 batch_size = spec.shape[0] res = [] for b in range(batch_size): res.append(ispecgram(spec[b, :, :, :], n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only, num_iters)) return np.array(res) def tf_specgram(audio, n_fft=512, hop_length=None, mask=True, log_mag=True, re_im=False, dphase=True, mag_only=False): return tf.py_func(batch_specgram, [ audio, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only ], tf.float32) def tf_ispecgram(spec, n_fft=512, hop_length=None, mask=True, pad=True, log_mag=True, re_im=False, dphase=True, mag_only=False, num_iters=1000): dims = spec.get_shape().as_list() # Add back in nyquist frequency x = spec if not pad else tf.concat( [spec, tf.zeros([dims[0], 1, dims[2], dims[3]])], 1) audio = tf.py_func(batch_ispecgram, [x, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only, num_iters], tf.float32) return audio #--------------------------------------------------- # Summaries #--------------------------------------------------- def form_image_grid(input_tensor, grid_shape, image_shape, num_channels): """Arrange a minibatch of images into a grid to form a single image. Args: input_tensor: Tensor. Minibatch of images to format, either 4D ([batch size, height, width, num_channels]) or flattened ([batch size, height * width * num_channels]). grid_shape: Sequence of int. The shape of the image grid, formatted as [grid_height, grid_width]. image_shape: Sequence of int. The shape of a single image, formatted as [image_height, image_width]. num_channels: int. The number of channels in an image. Returns: Tensor representing a single image in which the input images have been arranged into a grid. Raises: ValueError: The grid shape and minibatch size don't match, or the image shape and number of channels are incompatible with the input tensor. """ if grid_shape[0] * grid_shape[1] != int(input_tensor.get_shape()[0]): raise ValueError("Grid shape incompatible with minibatch size.") if len(input_tensor.get_shape()) == 2: num_features = image_shape[0] * image_shape[1] * num_channels if int(input_tensor.get_shape()[1]) != num_features: raise ValueError("Image shape and number of channels incompatible with " "input tensor.") elif len(input_tensor.get_shape()) == 4: if (int(input_tensor.get_shape()[1]) != image_shape[0] or int(input_tensor.get_shape()[2]) != image_shape[1] or int(input_tensor.get_shape()[3]) != num_channels): raise ValueError("Image shape and number of channels incompatible with " "input tensor.") else: raise ValueError("Unrecognized input tensor format.") height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1] input_tensor = tf.reshape(input_tensor, grid_shape + image_shape + [num_channels]) input_tensor = tf.transpose(input_tensor, [0, 1, 3, 2, 4]) input_tensor = tf.reshape( input_tensor, [grid_shape[0], width, image_shape[0], num_channels]) input_tensor = tf.transpose(input_tensor, [0, 2, 1, 3]) input_tensor = tf.reshape(input_tensor, [1, height, width, num_channels]) return input_tensor def specgram_summaries(spec, name, hparams, rows=4, columns=4, image=True, phase=True, audio=True): """Post summaries of a specgram (Image and Audio). For image summaries, creates a rows x columns composite image from the batch. Also can create audio summaries for raw audio, but hparams.raw_audio must be True. Args: spec: Batch of spectrograms. name: String prepended to summaries. hparams: Hyperparamenters. rows: Int, number of rows in image. columns: Int, number of columns in image. image: Bool, create image summary. phase: Bool, create image summary from second channel in the batch. audio: Bool, create audio summaries for each spectrogram in the batch. """ batch_size, n_freq, n_time, unused_channels = spec.get_shape().as_list() # Must divide minibatch evenly b = min(batch_size, rows * columns) if hparams.raw_audio: spec = tf.squeeze(spec) spec /= tf.expand_dims(tf.reduce_max(spec, axis=1), axis=1) tf.summary.audio( name, tf.squeeze(spec), hparams.samples_per_second, max_outputs=b) else: if image: if b % columns != 0: rows = np.floor(np.sqrt(b)) columns = rows else: rows = b / columns tf.summary.image("Mag/%s" % name, form_image_grid(spec[:b, :, :, :1], [rows, columns], [n_freq, n_time], 1)) if phase: tf.summary.image("Phase/%s" % name, form_image_grid(spec[:b, :, :, 1:], [rows, columns], [n_freq, n_time], 1)) if audio: tf.summary.audio( name, tf_ispecgram( spec, n_fft=hparams.n_fft, hop_length=hparams.hop_length, mask=hparams.mask, log_mag=hparams.log_mag, pad=hparams.pad, re_im=hparams.re_im, dphase=hparams.dphase, mag_only=hparams.mag_only), hparams.samples_per_second, max_outputs=b) def calculate_softmax_and_summaries(logits, one_hot_labels, name): """Calculate the softmax cross entropy loss and associated summaries. Args: logits: Tensor of logits, first dimension is batch size. one_hot_labels: Tensor of one hot encoded categorical labels. First dimension is batch size. name: Name to use as prefix for summaries. Returns: loss: Dimensionless tensor representing the mean negative log-probability of the true class. """ loss = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=one_hot_labels) loss = tf.reduce_mean(loss) softmax_summaries(loss, logits, one_hot_labels, name) return loss def calculate_sparse_softmax_and_summaries(logits, labels, name): """Calculate the softmax cross entropy loss and associated summaries. Args: logits: Tensor of logits, first dimension is batch size. labels: Tensor of categorical labels [ints]. First dimension is batch size. name: Name to use as prefix for summaries. Returns: loss: Dimensionless tensor representing the mean negative log-probability of the true class. """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels) loss = tf.reduce_mean(loss) softmax_summaries(loss, logits, labels, name) return loss def softmax_summaries(loss, logits, one_hot_labels, name="softmax"): """Create the softmax summaries for this cross entropy loss. Args: loss: Cross-entropy loss. logits: The [batch_size, classes] float tensor representing the logits. one_hot_labels: The float tensor representing actual class ids. If this is [batch_size, classes], then we take the argmax of it first. name: Prepended to summary scope. """ tf.summary.scalar(name + "_loss", loss) one_hot_labels = tf.cond( tf.equal(tf.rank(one_hot_labels), 2), lambda: tf.to_int32(tf.argmax(one_hot_labels, 1)), lambda: tf.to_int32(one_hot_labels)) in_top_1 = tf.nn.in_top_k(logits, one_hot_labels, 1) tf.summary.scalar(name + "_precision@1", tf.reduce_mean(tf.to_float(in_top_1))) in_top_5 = tf.nn.in_top_k(logits, one_hot_labels, 5) tf.summary.scalar(name + "_precision@5", tf.reduce_mean(tf.to_float(in_top_5))) def calculate_l2_and_summaries(predicted_vectors, true_vectors, name): """Calculate L2 loss and associated summaries. Args: predicted_vectors: Tensor of predictions, first dimension is batch size. true_vectors: Tensor of labels, first dimension is batch size. name: Name to use as prefix for summaries. Returns: loss: Dimensionless tensor representing the mean euclidean distance between true and predicted. """ loss = tf.reduce_mean((predicted_vectors - true_vectors)**2) tf.summary.scalar(name + "_loss", loss, name="loss") tf.summary.scalar( name + "_prediction_mean_squared_norm", tf.reduce_mean(tf.nn.l2_loss(predicted_vectors)), name=name + "_prediction_mean_squared_norm") tf.summary.scalar( name + "_label_mean_squared_norm", tf.reduce_mean(tf.nn.l2_loss(true_vectors)), name=name + "_label_mean_squared_norm") return loss def frequency_weighted_cost_mask(peak=10.0, hz_flat=1000, sr=16000, n_fft=512): """Calculates a mask to weight lower frequencies higher. Piecewise linear approximation. Assumes magnitude is in log scale. Args: peak: Cost increase at 0 Hz. hz_flat: Hz at which cost increase is 0. sr: Sample rate. n_fft: FFT size. Returns: Constant tensor [1, N_freq, 1] of cost weighting. """ n = int(n_fft / 2) cutoff = np.where( librosa.core.fft_frequencies(sr=sr, n_fft=n_fft) >= hz_flat)[0][0] mask = np.concatenate([np.linspace(peak, 1.0, cutoff), np.ones(n-cutoff)]) return tf.constant(mask[np.newaxis, :, np.newaxis], dtype=tf.float32) #--------------------------------------------------- # Neural Nets #--------------------------------------------------- def pitch_embeddings(batch, timesteps=1, n_pitches=128, dim_embedding=128, reuse=False): """Get a embedding of each pitch note. Args: batch: NSynthDataset batch dictionary. timesteps: Number of timesteps to replicate across. n_pitches: Number of one-hot embeddings. dim_embedding: Dimension of linear projection of one-hot encoding. reuse: Reuse variables. Returns: embedding: A tensor of shape [batch_size, 1, timesteps, dim_embedding]. """ batch_size = batch["pitch"].get_shape().as_list()[0] with tf.variable_scope("PitchEmbedding", reuse=reuse): w = tf.get_variable( name="embedding_weights", shape=[n_pitches, dim_embedding], initializer=tf.random_normal_initializer()) one_hot_pitch = tf.reshape(batch["pitch"], [batch_size]) one_hot_pitch = tf.one_hot(one_hot_pitch, depth=n_pitches) embedding = tf.matmul(one_hot_pitch, w) embedding = tf.reshape(embedding, [batch_size, 1, 1, dim_embedding]) if timesteps > 1: embedding = tf.tile(embedding, [1, 1, timesteps, 1]) return embedding def slim_batchnorm_arg_scope(is_training, activation_fn=None): """Create a scope for applying BatchNorm in slim. This scope also applies Glorot initializiation to convolutional weights. Args: is_training: Whether this is a training run. activation_fn: Whether we apply an activation_fn to the convolution result. Returns: scope: Use this scope to automatically apply BatchNorm and Xavier Init to slim.conv2d and slim.fully_connected. """ batch_norm_params = { "is_training": is_training, "decay": 0.999, "epsilon": 0.001, "variables_collections": { "beta": None, "gamma": None, "moving_mean": "moving_vars", "moving_variance": "moving_vars", } } with slim.arg_scope( [slim.conv2d, slim.fully_connected, slim.conv2d_transpose], weights_initializer=slim.initializers.xavier_initializer(), activation_fn=activation_fn, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as scope: return scope def conv2d(x, kernel_size, stride, channels, is_training, scope="conv2d", batch_norm=False, residual=False, gated=False, activation_fn=tf.nn.relu, resize=False, transpose=False, stacked_layers=1): """2D-Conv with optional batch_norm, gating, residual. Args: x: Tensor input [MB, H, W, CH]. kernel_size: List [H, W]. stride: List [H, W]. channels: Int, output channels. is_training: Whether to collect stats for BatchNorm. scope: Enclosing scope name. batch_norm: Apply batch normalization residual: Residual connections, have stacked_layers >= 2. gated: Gating ala Wavenet. activation_fn: Nonlinearity function. resize: On transposed convolution, do ImageResize instead of conv_transpose. transpose: Use conv_transpose instead of conv. stacked_layers: Number of layers before a residual connection. Returns: x: Tensor output. """ # For residual x0 = x # Choose convolution function conv_fn = slim.conv2d_transpose if transpose else slim.conv2d # Double output channels for gates num_outputs = channels * 2 if gated else channels normalizer_fn = slim.batch_norm if batch_norm else None with tf.variable_scope(scope + "_Layer"): # Apply a stack of convolutions Before adding residual for layer_idx in range(stacked_layers): with slim.arg_scope( slim_batchnorm_arg_scope( is_training, activation_fn=None)): # Use interpolation to upsample instead of conv_transpose if transpose and resize: unused_mb, h, w, unused_ch = x.get_shape().as_list() x = tf.image.resize_images( x, size=[h * stride[0], w * stride[1]], method=0) stride_conv = [1, 1] else: stride_conv = stride x = conv_fn( inputs=x, stride=stride_conv, kernel_size=kernel_size, num_outputs=num_outputs, normalizer_fn=normalizer_fn, biases_initializer=tf.zeros_initializer(), scope=scope) if gated: with tf.variable_scope("Gated"): x1, x2 = x[:, :, :, :channels], x[:, :, :, channels:] if activation_fn: x1, x2 = activation_fn(x1), tf.sigmoid(x2) else: x2 = tf.sigmoid(x2) x = x1 * x2 # Apply residual to last layer before the last nonlinearity if residual and (layer_idx == stacked_layers - 1): with tf.variable_scope("Residual"): # Don't upsample residual in time if stride[0] == 1 and stride[1] == 1: channels_in = x0.get_shape().as_list()[-1] # Make n_channels match for residual if channels != channels_in: x0 = slim.conv2d( inputs=x0, stride=[1, 1], kernel_size=[1, 1], num_outputs=channels, normalizer_fn=None, activation_fn=None, biases_initializer=tf.zeros_initializer, scope=scope + "_residual") x += x0 else: x += x0 if activation_fn and not gated: x = activation_fn(x) return x def leaky_relu(leak=0.1): """Leaky ReLU activation function. Args: leak: float. Slope for the negative part of the leaky ReLU function. Defaults to 0.1. Returns: A lambda computing the leaky ReLU function with the specified slope. """ return lambda x: tf.maximum(x, leak * x)
{ "content_hash": "75b3374bb59662171e6b383421e9a7fb", "timestamp": "", "source": "github", "line_count": 720, "max_line_length": 80, "avg_line_length": 33.475, "alnum_prop": 0.5999502116006971, "repo_name": "bda2017-shallowermind/MusTGAN", "id": "30cc0df266157a4765c5f71d5d8e9b0af5a9fa09", "size": "24697", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "magenta/magenta/models/nsynth/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "12668" }, { "name": "HTML", "bytes": "721" }, { "name": "JavaScript", "bytes": "43259" }, { "name": "Jupyter Notebook", "bytes": "2115912" }, { "name": "Protocol Buffer", "bytes": "12931" }, { "name": "Python", "bytes": "1389487" }, { "name": "Shell", "bytes": "8783" } ], "symlink_target": "" }
"""Tests for learned_optimizers.adafac_mlp_lopt.""" from absl.testing import absltest import jax from learned_optimization.learned_optimizers import adafac_mlp_lopt from learned_optimization.learned_optimizers import test_utils import numpy as onp class AdafacMLPLOptTest(absltest.TestCase): def test_adafac_mlp_lopt(self): test_utils.smoketest_learned_optimizer(adafac_mlp_lopt.AdafacMLPLOpt()) def test_split_weights_equal_to_full(self): lopt1 = adafac_mlp_lopt.AdafacMLPLOpt( concat_weights=True, step_mult=10, exp_mult=1) lopt2 = adafac_mlp_lopt.AdafacMLPLOpt( concat_weights=False, split_weights=True, step_mult=10, exp_mult=1) key = jax.random.PRNGKey(0) theta = lopt1.init(key) opt1 = lopt1.opt_fn(theta) opt2 = lopt2.opt_fn(theta) p = (jax.random.normal(key, [2, 2]),) g = (jax.random.normal(key, [2, 2]),) opt_state = opt1.init(p, num_steps=10) print(opt_state) opt_state1 = opt1.update(opt_state, g, 1.0) opt_state2 = opt2.update(opt_state, g, 1.0) diff = opt_state1.params[0] - opt_state2.params[0] self.assertLess(onp.mean(onp.abs(diff)), 1e-6) if __name__ == '__main__': absltest.main()
{ "content_hash": "d3268bc4c491514298a353e25c8eef27", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 75, "avg_line_length": 30.58974358974359, "alnum_prop": 0.6873428331936295, "repo_name": "google/learned_optimization", "id": "f371ae1f36ce1c7bdd7029c26f0d704a3c258ae8", "size": "1784", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "learned_optimization/learned_optimizers/adafac_mlp_lopt_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "177493" }, { "name": "Python", "bytes": "1290675" } ], "symlink_target": "" }
from rest_framework import status from rest_framework.test import APITestCase from cla_common.constants import REASONS_FOR_CONTACTING from checker.models import ReasonForContacting from core.tests.mommy_utils import make_recipe from core.tests.test_base import SimpleResourceAPIMixin from legalaid.tests.views.test_base import CLACheckerAuthBaseApiTestMixin class ReasonsForContactingTestCase(SimpleResourceAPIMixin, CLACheckerAuthBaseApiTestMixin, APITestCase): LOOKUP_KEY = "reference" API_URL_BASE_NAME = "reasons_for_contacting" RESOURCE_RECIPE = "checker.reasonforcontacting" def setUp(self): super(ReasonsForContactingTestCase, self).setUp() # give it a category as it's not auto-generated make_recipe("checker.reasonforcontacting_category", reason_for_contacting=self.resource) def test_retrieval_disallowed(self): self._test_get_not_allowed(self.list_url) self._test_get_not_allowed(self.detail_url) def test_create_model(self): resource = {"reasons": [{"category": REASONS_FOR_CONTACTING.OTHER}], "other_reasons": "lorem ipsum"} response = self.client.post( self.list_url, data=resource, format="json", # HTTP_AUTHORIZATION=self.get_http_authorization() ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertTrue(response.data["reference"]) self.assertIsNone(response.data["case"]) def test_can_add_case_ref(self): self.assertIsNone(self.resource.case) eligible_case = make_recipe("legalaid.eligible_case") response = self.client.patch( self.detail_url, data={"case": eligible_case.reference}, format="json", # HTTP_AUTHORIZATION=self.get_http_authorization() ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["case"], eligible_case.reference) self.assertEqual(response.data["reference"], str(self.resource.reference)) def test_stats(self): # only considers shared resource created during setup stats = ReasonForContacting.get_category_stats() self.assertEqual(stats["total_count"], 1) ideal_categories = dict((choice, 0.0) for choice in REASONS_FOR_CONTACTING.CHOICES_DICT) ideal_categories[self.resource.reasons.first().category] = 100.0 categories = dict((stat["key"], stat["percentage"]) for stat in stats["categories"]) self.assertDictEqual(categories, ideal_categories)
{ "content_hash": "d68cda4131c0a591b451a7987a03a950", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 108, "avg_line_length": 43.86440677966102, "alnum_prop": 0.6943585780525502, "repo_name": "ministryofjustice/cla_backend", "id": "2c8baee1969f257b58d123ad57295b09361962a2", "size": "2588", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cla_backend/apps/checker/tests/api/test_reasons_for_contacting_api.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "45941" }, { "name": "Dockerfile", "bytes": "1272" }, { "name": "HTML", "bytes": "14794" }, { "name": "JavaScript", "bytes": "2762" }, { "name": "Mustache", "bytes": "3607" }, { "name": "Python", "bytes": "1577558" }, { "name": "Shell", "bytes": "11204" }, { "name": "Smarty", "bytes": "283906" } ], "symlink_target": "" }
import pretend import pytest from pyramid.httpexceptions import HTTPMovedPermanently, HTTPSeeOther from warehouse.accounts import views from warehouse.accounts.interfaces import IUserService from ...common.db.accounts import UserFactory class TestUserProfile: def test_user_redirects_username(self, db_request): user = UserFactory.create() if user.username.upper() != user.username: username = user.username.upper() else: username = user.username.lower() db_request.current_route_path = pretend.call_recorder( lambda username: "/user/the-redirect/" ) db_request.matchdict = {"username": username} result = views.profile(user, db_request) assert isinstance(result, HTTPMovedPermanently) assert result.headers["Location"] == "/user/the-redirect/" assert db_request.current_route_path.calls == [ pretend.call(username=user.username), ] def test_returns_user(self, db_request): user = UserFactory.create() assert views.profile(user, db_request) == { "user": user, "projects": [], } class TestLogin: @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_get_returns_form(self, pyramid_request, next_url): login_service = pretend.stub() pyramid_request.find_service = pretend.call_recorder( lambda iface, context: login_service ) form_obj = pretend.stub() form_class = pretend.call_recorder(lambda d, login_service: form_obj) if next_url is not None: pyramid_request.GET["next"] = next_url result = views.login(pyramid_request, _form_class=form_class) assert result == { "form": form_obj, "redirect": {"field": "next", "data": next_url}, } assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert form_class.calls == [ pretend.call(pyramid_request.POST, login_service=login_service), ] @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_post_invalid_returns_form(self, pyramid_request, next_url): login_service = pretend.stub() pyramid_request.find_service = pretend.call_recorder( lambda iface, context: login_service ) pyramid_request.method = "POST" if next_url is not None: pyramid_request.POST["next"] = next_url form_obj = pretend.stub(validate=pretend.call_recorder(lambda: False)) form_class = pretend.call_recorder(lambda d, login_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert result == { "form": form_obj, "redirect": {"field": "next", "data": next_url}, } assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert form_class.calls == [ pretend.call(pyramid_request.POST, login_service=login_service), ] assert form_obj.validate.calls == [pretend.call()] @pytest.mark.parametrize("with_user", [True, False]) def test_post_validate_redirects(self, monkeypatch, pyramid_request, with_user): remember = pretend.call_recorder( lambda request, user_id: [("foo", "bar")] ) monkeypatch.setattr(views, "remember", remember) new_session = {} login_service = pretend.stub( find_userid=pretend.call_recorder(lambda username: 1), ) pyramid_request.find_service = pretend.call_recorder( lambda iface, context: login_service ) pyramid_request.method = "POST" pyramid_request.session = pretend.stub( items=lambda: [("a", "b"), ("foo", "bar")], update=new_session.update, invalidate=pretend.call_recorder(lambda: None), new_csrf_token=pretend.call_recorder(lambda: None), ) pyramid_request.set_property( lambda r: 1234 if with_user else None, name="unauthenticated_userid", ) form_obj = pretend.stub( validate=pretend.call_recorder(lambda: True), username=pretend.stub(data="theuser"), ) form_class = pretend.call_recorder(lambda d, login_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" assert result.headers["foo"] == "bar" assert form_class.calls == [ pretend.call(pyramid_request.POST, login_service=login_service), ] assert form_obj.validate.calls == [pretend.call()] assert login_service.find_userid.calls == [pretend.call("theuser")] if with_user: assert new_session == {} else: assert new_session == {"a": "b", "foo": "bar"} assert remember.calls == [pretend.call(pyramid_request, 1)] assert pyramid_request.session.invalidate.calls == [pretend.call()] assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert pyramid_request.session.new_csrf_token.calls == [pretend.call()] @pytest.mark.parametrize( # The set of all possible next URLs. Since this set is infinite, we # test only a finite set of reasonable URLs. ("expected_next_url, observed_next_url"), [ ("/security/", "/security/"), ("http://example.com", "/"), ], ) def test_post_validate_no_redirects(self, pyramid_request, expected_next_url, observed_next_url): login_service = pretend.stub( find_userid=pretend.call_recorder(lambda username: 1), ) pyramid_request.find_service = pretend.call_recorder( lambda iface, context: login_service ) pyramid_request.method = "POST" pyramid_request.POST["next"] = expected_next_url form_obj = pretend.stub( validate=pretend.call_recorder(lambda: True), username=pretend.stub(data="theuser"), ) form_class = pretend.call_recorder(lambda d, login_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == observed_next_url class TestLogout: @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_get_returns_empty(self, pyramid_request, next_url): if next_url is not None: pyramid_request.GET["next"] = next_url assert views.logout(pyramid_request) == \ {"redirect": {"field": "next", "data": next_url}} def test_post_forgets_user(self, monkeypatch, pyramid_request): forget = pretend.call_recorder(lambda request: [("foo", "bar")]) monkeypatch.setattr(views, "forget", forget) pyramid_request.method = "POST" pyramid_request.session = pretend.stub( invalidate=pretend.call_recorder(lambda: None), ) result = views.logout(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" assert result.headers["foo"] == "bar" assert forget.calls == [pretend.call(pyramid_request)] assert pyramid_request.session.invalidate.calls == [pretend.call()] @pytest.mark.parametrize( # The set of all possible next URLs. Since this set is infinite, we # test only a finite set of reasonable URLs. ("expected_next_url, observed_next_url"), [ ("/security/", "/security/"), ("http://example.com", "/"), ], ) def test_post_redirects_user(self, pyramid_request, expected_next_url, observed_next_url): pyramid_request.method = "POST" pyramid_request.POST["next"] = expected_next_url result = views.logout(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == observed_next_url
{ "content_hash": "c9b28d5c7dfa2d8c0df5541fc35bab89", "timestamp": "", "source": "github", "line_count": 235, "max_line_length": 79, "avg_line_length": 35.87659574468085, "alnum_prop": 0.598149685683786, "repo_name": "ismail-s/warehouse", "id": "f0add757b64e3667c7d0c6867358df49a2785659", "size": "8972", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/unit/accounts/test_views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "73344" }, { "name": "HTML", "bytes": "33890" }, { "name": "JavaScript", "bytes": "7178" }, { "name": "Makefile", "bytes": "2466" }, { "name": "Mako", "bytes": "911" }, { "name": "Perl", "bytes": "6993" }, { "name": "Python", "bytes": "597758" }, { "name": "Shell", "bytes": "2221" } ], "symlink_target": "" }
import json import logging import sys from redash.query_runner import * from redash.utils import JSONEncoder logger = logging.getLogger(__name__) try: import pymssql enabled = True except ImportError: enabled = False # from _mssql.pyx ## DB-API type definitions & http://www.freetds.org/tds.html#types ## types_map = { 1: TYPE_STRING, 2: TYPE_BOOLEAN, 3: TYPE_INTEGER, 4: TYPE_DATETIME, 5: TYPE_FLOAT, } class SqlServer(BaseSQLQueryRunner): @classmethod def configuration_schema(cls): return { "type": "object", "properties": { "user": { "type": "string" }, "password": { "type": "string" }, "server": { "type": "string", "default": "127.0.0.1" }, "port": { "type": "number", "default": 1433 }, "db": { "type": "string", "title": "Database Name" } }, "required": ["db"], "secret": ["password"] } @classmethod def enabled(cls): return enabled @classmethod def type(cls): return "mssql" def __init__(self, configuration): super(SqlServer, self).__init__(configuration) def _get_tables(self, schema): query = """ SELECT table_schema, table_name, column_name FROM information_schema.columns WHERE table_schema NOT IN ('guest','INFORMATION_SCHEMA','sys','db_owner','db_accessadmin' ,'db_securityadmin','db_ddladmin','db_backupoperator','db_datareader' ,'db_datawriter','db_denydatareader','db_denydatawriter' ); """ results, error = self.run_query(query) if error is not None: raise Exception("Failed getting schema.") results = json.loads(results) for row in results['rows']: if row['table_schema'] != self.configuration['db']: table_name = '{}.{}'.format(row['table_schema'], row['table_name']) else: table_name = row['table_name'] if table_name not in schema: schema[table_name] = {'name': table_name, 'columns': []} schema[table_name]['columns'].append(row['column_name']) return schema.values() def run_query(self, query): connection = None try: server = self.configuration.get('server', '') user = self.configuration.get('user', '') password = self.configuration.get('password', '') db = self.configuration['db'] port = self.configuration.get('port', 1433) if port != 1433: server = server + ':' + str(port) connection = pymssql.connect(server, user, password, db) cursor = connection.cursor() logger.debug("SqlServer running query: %s", query) cursor.execute(query) data = cursor.fetchall() if cursor.description is not None: columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) rows = [dict(zip((c['name'] for c in columns), row)) for row in data] data = {'columns': columns, 'rows': rows} json_data = json.dumps(data, cls=JSONEncoder) error = None else: error = "No data was returned." json_data = None cursor.close() except pymssql.Error as e: logging.exception(e) try: # Query errors are at `args[1]` error = e.args[1] except IndexError: # Connection errors are `args[0][1]` error = e.args[0][1] json_data = None except KeyboardInterrupt: connection.cancel() error = "Query cancelled by user." json_data = None except Exception as e: raise sys.exc_info()[1], None, sys.exc_info()[2] finally: if connection: connection.close() return json_data, error register(SqlServer)
{ "content_hash": "8044ce5c6ad241bacfa21dbe1ab998fb", "timestamp": "", "source": "github", "line_count": 150, "max_line_length": 109, "avg_line_length": 29.753333333333334, "alnum_prop": 0.4920457091642393, "repo_name": "olivetree123/redash-x", "id": "0ec05bdf07a245c4eb5d76c292e51c56d4a97feb", "size": "4463", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "redash/query_runner/mssql.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "9516" }, { "name": "HTML", "bytes": "93714" }, { "name": "JavaScript", "bytes": "218148" }, { "name": "Makefile", "bytes": "1009" }, { "name": "Nginx", "bytes": "561" }, { "name": "Python", "bytes": "362451" }, { "name": "Ruby", "bytes": "709" }, { "name": "Shell", "bytes": "43392" } ], "symlink_target": "" }
"""Commonly used utility functions.""" import re import copy import warnings from collections.abc import Iterable from copy import deepcopy import numpy as np from scipy.spatial.distance import cdist from astropy.time import Time from astropy.coordinates import Angle from astropy.utils import iers from astropy.coordinates import SkyCoord, Distance, EarthLocation from astropy import units import erfa from . import _utils __all__ = [ "POL_STR2NUM_DICT", "POL_NUM2STR_DICT", "CONJ_POL_DICT", "JONES_STR2NUM_DICT", "JONES_NUM2STR_DICT", "LatLonAlt_from_XYZ", "XYZ_from_LatLonAlt", "rotECEF_from_ECEF", "ECEF_from_rotECEF", "ENU_from_ECEF", "ECEF_from_ENU", "phase_uvw", "unphase_uvw", "uvcalibrate", "apply_uvflag", "get_lst_for_time", "polstr2num", "polnum2str", "jstr2num", "jnum2str", "parse_polstr", "parse_jpolstr", "conj_pol", "reorder_conj_pols", "baseline_to_antnums", "antnums_to_baseline", "baseline_index_flip", "get_baseline_redundancies", "get_antenna_redundancies", "collapse", "mean_collapse", "absmean_collapse", "quadmean_collapse", "or_collapse", "and_collapse", ] # fmt: off # polarization constants # maps polarization strings to polarization integers POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4, "I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names "rr": -1, "ll": -2, "rl": -3, "lr": -4, "xx": -5, "yy": -6, "xy": -7, "yx": -8, "hh": -5, "vv": -6, "hv": -7, "vh": -8} # maps polarization integers to polarization strings POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV", -1: "rr", -2: "ll", -3: "rl", -4: "lr", -5: "xx", -6: "yy", -7: "xy", -8: "yx"} # maps how polarizations change when antennas are swapped CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy", "ee": "ee", "nn": "nn", "en": "ne", "ne": "en", "rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl", "I": "I", "Q": "Q", "U": "U", "V": "V", "pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"} # maps jones matrix element strings to jones integers # Add entries that don't start with "J" to allow shorthand versions JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8, "xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8, "Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4, "rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4} # maps jones integers to jones matrix element strings JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr", -5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"} # maps uvdata pols to input feed polarizations POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"], "xy": ["x", "y"], "yx": ["y", "x"], "ee": ["e", "e"], "nn": ["n", "n"], "en": ["e", "n"], "ne": ["n", "e"], "rr": ["r", "r"], "ll": ["l", "l"], "rl": ["r", "l"], "lr": ["l", "r"]} # fmt: on def _get_iterable(x): """Return iterable version of input.""" if isinstance(x, Iterable): return x else: return (x,) def _fits_gethduaxis(hdu, axis): """ Make axis arrays for fits files. Parameters ---------- hdu : astropy.io.fits HDU object The HDU to make an axis array for. axis : int The axis number of interest (1-based). Returns ------- ndarray of float Array of values for the specified axis. """ ax = str(axis) axis_num = hdu.header["NAXIS" + ax] val = hdu.header["CRVAL" + ax] delta = hdu.header["CDELT" + ax] index = hdu.header["CRPIX" + ax] - 1 return delta * (np.arange(axis_num) - index) + val def _fits_indexhdus(hdulist): """ Get a dict of table names and HDU numbers from a FITS HDU list. Parameters ---------- hdulist : list of astropy.io.fits HDU objects List of HDUs to get names for Returns ------- dict dictionary with table names as keys and HDU number as values. """ tablenames = {} for i in range(len(hdulist)): try: tablenames[hdulist[i].header["EXTNAME"]] = i except (KeyError): continue return tablenames def _get_fits_extra_keywords(header, keywords_to_skip=None): """ Get any extra keywords and return as dict. Parameters ---------- header : FITS header object header object to get extra_keywords from. keywords_to_skip : list of str list of keywords to not include in extra keywords in addition to standard FITS keywords. Returns ------- dict dict of extra keywords. """ # List standard FITS header items that are still should not be included in # extra_keywords # These are the beginnings of FITS keywords to ignore, the actual keywords # often include integers following these names (e.g. NAXIS1, CTYPE3) std_fits_substrings = [ "HISTORY", "SIMPLE", "BITPIX", "EXTEND", "BLOCKED", "GROUPS", "PCOUNT", "BSCALE", "BZERO", "NAXIS", "PTYPE", "PSCAL", "PZERO", "CTYPE", "CRVAL", "CRPIX", "CDELT", "CROTA", "CUNIT", ] if keywords_to_skip is not None: std_fits_substrings.extend(keywords_to_skip) extra_keywords = {} # find all the other header items and keep them as extra_keywords for key in header: # check if key contains any of the standard FITS substrings if np.any([sub in key for sub in std_fits_substrings]): continue if key == "COMMENT": extra_keywords[key] = str(header.get(key)) elif key != "": extra_keywords[key] = header.get(key) return extra_keywords def _check_history_version(history, version_string): """Check if version_string is present in history string.""" if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""): return True else: return False def _check_histories(history1, history2): """Check if two histories are the same.""" if history1.replace("\n", "").replace(" ", "") == history2.replace( "\n", "" ).replace(" ", ""): return True else: return False def _combine_history_addition(history1, history2): """ Find extra history to add to have minimal repeats. Parameters ---------- history1 : str First history. history2 : str Second history Returns ------- str Extra history to add to first history. """ # first check if they're the same to avoid more complicated processing. if _check_histories(history1, history2): return None hist2_words = history2.split(" ") add_hist = "" test_hist1 = " " + history1 + " " for i, word in enumerate(hist2_words): if " " + word + " " not in test_hist1: add_hist += " " + word keep_going = i + 1 < len(hist2_words) while keep_going: if (hist2_words[i + 1] == " ") or ( " " + hist2_words[i + 1] + " " not in test_hist1 ): add_hist += " " + hist2_words[i + 1] del hist2_words[i + 1] keep_going = i + 1 < len(hist2_words) else: keep_going = False if add_hist == "": add_hist = None return add_hist def _test_array_constant(array, tols=None): """ Check if an array contains constant values to some tolerance. Uses np.isclose on the min & max of the arrays with the given tolerances. Parameters ---------- array : np.ndarray or UVParameter UVParameter or array to check for constant values. tols : tuple of float, optional length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if passing an array, otherwise defaults to using the tolerance on the UVParameter. Returns ------- bool True if the array is constant to the given tolerances, False otherwise. """ # Import UVParameter here rather than at the top to avoid circular imports from pyuvdata.parameter import UVParameter if isinstance(array, UVParameter): array_to_test = array.value if tols is None: tols = array.tols else: array_to_test = array if tols is None: tols = (0, 0) assert isinstance(tols, tuple), "tols must be a length-2 tuple" assert len(tols) == 2, "tols must be a length-2 tuple" if array_to_test.size == 1: # arrays with 1 element are constant by definition return True # if min and max are equal don't bother with tolerance checking if np.min(array_to_test) == np.max(array_to_test): return True return np.isclose( np.min(array_to_test), np.max(array_to_test), rtol=tols[0], atol=tols[1], ) def _test_array_constant_spacing(array, tols=None): """ Check if an array is constantly spaced to some tolerance. Calls _test_array_constant on the np.diff of the array. Parameters ---------- array : np.ndarray or UVParameter UVParameter or array to check for constant spacing. tols : tuple of float, optional length 2 tuple giving (rtol, atol) to pass to np.isclose, defaults to (0, 0) if passing an array, otherwise defaults to using the tolerance on the UVParameter. Returns ------- bool True if the array spacing is constant to the given tolerances, False otherwise. """ # Import UVParameter here rather than at the top to avoid circular imports from pyuvdata.parameter import UVParameter if isinstance(array, UVParameter): array_to_test = array.value if tols is None: tols = array.tols else: array_to_test = array if tols is None: tols = (0, 0) assert isinstance(tols, tuple), "tols must be a length-2 tuple" assert len(tols) == 2, "tols must be a length-2 tuple" if array_to_test.size <= 2: # arrays with 1 or 2 elements are constantly spaced by definition return True array_diff = np.diff(array_to_test) return _test_array_constant(array_diff, tols=tols) def _check_flex_spw_contiguous(spw_array, flex_spw_id_array): """ Check if the spectral windows are contiguous for flex_spw datasets. This checks the flex_spw_id_array to make sure that all channels for each spectral window are together in one block, versus being interspersed (e.g., channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory, UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file formats cannot, so we just consider it forbidden. Parameters ---------- spw_array : array of integers Array of spectral window numbers, shape (Nspws,). flex_spw_id_array : array of integers Array of spectral window numbers per frequency channel, shape (Nfreqs,). """ exp_spw_ids = np.unique(spw_array) # This is an internal consistency check to make sure that the indexes match # up as expected -- this shouldn't error unless someone is mucking with # settings they shouldn't be. assert np.all(np.unique(flex_spw_id_array) == exp_spw_ids), ( "There are some entries in flex_spw_id_array that are not in spw_array. " "This is a bug, please report it in an issue." ) n_breaks = np.sum(flex_spw_id_array[1:] != flex_spw_id_array[:-1]) if (n_breaks + 1) != spw_array.size: raise ValueError( "Channels from different spectral windows are interspersed with " "one another, rather than being grouped together along the " "frequency axis. Most file formats do not support such " "non-grouping of data." ) return True def _check_freq_spacing( freq_array, freq_tols, channel_width, channel_width_tols, flex_spw, future_array_shapes, spw_array, flex_spw_id_array, raise_errors=True, ): """ Check if frequencies are evenly spaced and separated by their channel width. This is a requirement for writing uvfits & miriad files. Parameters ---------- freq_array : array of float Array of frequencies, shape (1, Nfreqs) or (Nfreqs,) if future_array_shapes=True freq_tols : tuple of float freq_array tolerances (from uvobj._freq_array.tols). channel_width : float or array of float Channel widths, either a scalar or an array of shape (Nfreqs,) if flex_spw=True and/or future_array_shapes=True. channel_width_tols : tuple of float channel_width tolerances (from uvobj._channel_width.tols). future_array_shapes : bool Indicates that parameters have future shapes. flex_spw : bool Indicates there are flexible spectral windows. spw_array : array of integers or None Array of spectral window numbers, shape (Nspws,). Required if flex_spw is True. flex_spw_id_array : array of integers or None Array of spectral window numbers per frequency channel, shape (Nfreqs,). Required if flex_spw is True. raise_errors : bool Option to raise errors if the various checks do not pass. Returns ------- spacing_error : bool Flag that channel spacings or channel widths are not equal. chanwidth_error : bool Flag that channel spacing does not match channel width. """ spacing_error = False chanwidth_error = False Nfreqs = freq_array.size if future_array_shapes: freq_spacing = np.diff(freq_array) freq_array_use = freq_array else: freq_spacing = np.diff(freq_array[0]) freq_array_use = freq_array[0] if Nfreqs == 1: # Skip all of this if there is only 1 channel pass elif flex_spw: # Check to make sure that the flexible spectral window has indicies set up # correctly (grouped together) for this check _check_flex_spw_contiguous(spw_array, flex_spw_id_array) diff_chanwidth = np.diff(channel_width) freq_dir = [] # We want to grab unique spw IDs, in the order that they appear in the data select_mask = np.append((np.diff(flex_spw_id_array) != 0), True) for idx in flex_spw_id_array[select_mask]: chan_mask = flex_spw_id_array == idx diffs = np.diff(freq_array_use[chan_mask]) if diffs.size > 0: freq_dir += [np.sign(np.mean(diffs))] * np.sum(chan_mask) else: freq_dir += [1.0] # Pop off the first entry, since the above arrays are diff'd # (and thus one element shorter) freq_dir = np.array(freq_dir[1:]) # Ignore cases where looking at the boundaries of spectral windows bypass_check = flex_spw_id_array[1:] != flex_spw_id_array[:-1] if not np.all( np.logical_or( bypass_check, np.isclose(diff_chanwidth, 0.0, rtol=freq_tols[0], atol=freq_tols[1]), ) ): spacing_error = True if not np.all( np.logical_or( bypass_check, np.isclose( freq_spacing, channel_width[1:] * freq_dir, rtol=freq_tols[0], atol=freq_tols[1], ), ) ): chanwidth_error = True else: freq_dir = np.sign(np.mean(freq_spacing)) if not _test_array_constant(freq_spacing, freq_tols): spacing_error = True if future_array_shapes: if not _test_array_constant(channel_width, freq_tols): spacing_error = True else: if not np.isclose( np.mean(freq_spacing), np.mean(channel_width) * freq_dir, rtol=channel_width_tols[0], atol=channel_width_tols[1], ): chanwidth_error = True else: if not np.isclose( np.mean(freq_spacing), channel_width * freq_dir, rtol=channel_width_tols[0], atol=channel_width_tols[1], ): chanwidth_error = True if raise_errors and spacing_error: raise ValueError( "The frequencies are not evenly spaced (probably " "because of a select operation) or has differing " "values of channel widths. Some file formats " "(e.g. uvfits, miriad) and methods (frequency_average) " "do not support unevenly spaced frequencies." ) if raise_errors and chanwidth_error: raise ValueError( "The frequencies are separated by more than their " "channel width (probably because of a select operation). " "Some file formats (e.g. uvfits, miriad) and " "methods (frequency_average) do not support " "frequencies that are spaced by more than their " "channel width." ) return spacing_error, chanwidth_error def baseline_to_antnums(baseline, Nants_telescope): """ Get the antenna numbers corresponding to a given baseline number. Parameters ---------- baseline : int or array_like of ints baseline number Nants_telescope : int number of antennas Returns ------- int or array_like of int first antenna number(s) int or array_like of int second antenna number(s) """ if Nants_telescope > 2048: raise Exception( "error Nants={Nants}>2048 not supported".format(Nants=Nants_telescope) ) return_array = isinstance(baseline, (np.ndarray, list, tuple)) ant1, ant2 = _utils.baseline_to_antnums( np.ascontiguousarray(baseline, dtype=np.int64) ) if return_array: return ant1, ant2 else: return ant1.item(0), ant2.item(0) def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False): """ Get the baseline number corresponding to two given antenna numbers. Parameters ---------- ant1 : int or array_like of int first antenna number ant2 : int or array_like of int second antenna number Nants_telescope : int number of antennas attempt256 : bool Option to try to use the older 256 standard used in many uvfits files (will use 2048 standard if there are more than 256 antennas). Default is False. Returns ------- int or array of int baseline number corresponding to the two antenna numbers. """ if Nants_telescope is not None and Nants_telescope > 2048: raise Exception( "cannot convert ant1, ant2 to a baseline index " "with Nants={Nants}>2048.".format(Nants=Nants_telescope) ) return_array = isinstance(ant1, (np.ndarray, list, tuple)) baseline = _utils.antnums_to_baseline( np.ascontiguousarray(ant1, dtype=np.int64), np.ascontiguousarray(ant2, dtype=np.int64), attempt256=attempt256, ) if return_array: return baseline else: return baseline.item(0) def baseline_index_flip(baseline, Nants_telescope): """Change baseline number to reverse antenna order.""" ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope) return antnums_to_baseline(ant2, ant1, Nants_telescope) def _x_orientation_rep_dict(x_orientation): """Create replacement dict based on x_orientation.""" if x_orientation.lower() == "east" or x_orientation.lower() == "e": return {"x": "e", "y": "n"} elif x_orientation.lower() == "north" or x_orientation.lower() == "n": return {"x": "n", "y": "e"} else: raise ValueError("x_orientation not recognized.") def polstr2num(pol, x_orientation=None): """ Convert polarization str to number according to AIPS Memo 117. Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes, not true Stokes, but also supports 'I', 'Q', 'U', 'V'. Parameters ---------- pol : str polarization string x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization ("east" or "north") to allow for converting from E/N strings. See corresonding parameter on UVData for more details. Returns ------- int Number corresponding to string Raises ------ ValueError If the pol string cannot be converted to a polarization number. Warns ----- UserWarning If the x_orientation not recognized. """ dict_use = copy.deepcopy(POL_STR2NUM_DICT) if x_orientation is not None: try: rep_dict = _x_orientation_rep_dict(x_orientation) for key, value in POL_STR2NUM_DICT.items(): new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[new_key] = value except ValueError: warnings.warn("x_orientation not recognized.") poldict = {k.lower(): v for k, v in dict_use.items()} if isinstance(pol, str): out = poldict[pol.lower()] elif isinstance(pol, Iterable): out = [poldict[key.lower()] for key in pol] else: raise ValueError( "Polarization {p} cannot be converted to a polarization number.".format( p=pol ) ) return out def polnum2str(num, x_orientation=None): """ Convert polarization number to str according to AIPS Memo 117. Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes, not true Stokes Parameters ---------- num : int polarization number x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization ("east" or "north") to convert to E/N strings. See corresonding parameter on UVData for more details. Returns ------- str String corresponding to polarization number Raises ------ ValueError If the polarization number cannot be converted to a polarization string. Warns ----- UserWarning If the x_orientation not recognized. """ dict_use = copy.deepcopy(POL_NUM2STR_DICT) if x_orientation is not None: try: rep_dict = _x_orientation_rep_dict(x_orientation) for key, value in POL_NUM2STR_DICT.items(): new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[key] = new_val except ValueError: warnings.warn("x_orientation not recognized.") if isinstance(num, (int, np.int32, np.int64)): out = dict_use[num] elif isinstance(num, Iterable): out = [dict_use[i] for i in num] else: raise ValueError( "Polarization {p} cannot be converted to string.".format(p=num) ) return out def jstr2num(jstr, x_orientation=None): """ Convert jones polarization str to number according to calfits memo. Parameters ---------- jstr : str antenna (jones) polarization string x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization ("east" or "north") to allow for converting from E/N strings. See corresonding parameter on UVData for more details. Returns ------- int antenna (jones) polarization number corresponding to string Raises ------ ValueError If the jones string cannot be converted to a polarization number. Warns ----- UserWarning If the x_orientation not recognized. """ dict_use = copy.deepcopy(JONES_STR2NUM_DICT) if x_orientation is not None: try: rep_dict = _x_orientation_rep_dict(x_orientation) for key, value in JONES_STR2NUM_DICT.items(): new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[new_key] = value except ValueError: warnings.warn("x_orientation not recognized.") jdict = {k.lower(): v for k, v in dict_use.items()} if isinstance(jstr, str): out = jdict[jstr.lower()] elif isinstance(jstr, Iterable): out = [jdict[key.lower()] for key in jstr] else: raise ValueError( "Jones polarization {j} cannot be converted to index.".format(j=jstr) ) return out def jnum2str(jnum, x_orientation=None): """ Convert jones polarization number to str according to calfits memo. Parameters ---------- num : int antenna (jones) polarization number x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization ("east" or "north") to convert to E/N strings. See corresonding parameter on UVData for more details. Returns ------- str antenna (jones) polarization string corresponding to number Raises ------ ValueError If the jones polarization number cannot be converted to a jones polarization string. Warns ----- UserWarning If the x_orientation not recognized. """ dict_use = copy.deepcopy(JONES_NUM2STR_DICT) if x_orientation is not None: try: rep_dict = _x_orientation_rep_dict(x_orientation) for key, value in JONES_NUM2STR_DICT.items(): new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"]) dict_use[key] = new_val except ValueError: warnings.warn("x_orientation not recognized.") if isinstance(jnum, (int, np.int32, np.int64)): out = dict_use[jnum] elif isinstance(jnum, Iterable): out = [dict_use[i] for i in jnum] else: raise ValueError( "Jones polarization {j} cannot be converted to string.".format(j=jnum) ) return out def parse_polstr(polstr, x_orientation=None): """ Parse a polarization string and return pyuvdata standard polarization string. See utils.POL_STR2NUM_DICT for options. Parameters ---------- polstr : str polarization string x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization ("east" or "north") to allow for converting from E/N strings. See corresonding parameter on UVData for more details. Returns ------- str AIPS Memo 117 standard string Raises ------ ValueError If the pol string cannot be converted to a polarization number. Warns ----- UserWarning If the x_orientation not recognized. """ return polnum2str( polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation ) def parse_jpolstr(jpolstr, x_orientation=None): """ Parse a Jones polarization string and return pyuvdata standard jones string. See utils.JONES_STR2NUM_DICT for options. Parameters ---------- jpolstr : str Jones polarization string Returns ------- str calfits memo standard string Raises ------ ValueError If the jones string cannot be converted to a polarization number. Warns ----- UserWarning If the x_orientation not recognized. """ return jnum2str( jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation ) def conj_pol(pol): """ Return the polarization for the conjugate baseline. For example, (1, 2, 'xy') = conj(2, 1, 'yx'). The returned polarization is determined by assuming the antenna pair is reversed in the data, and finding the correct polarization correlation which will yield the requested baseline when conjugated. Note this means changing the polarization for linear cross-pols, but keeping auto-pol (e.g. xx) and Stokes the same. Parameters ---------- pol : str or int Polarization string or integer. Returns ------- cpol : str or int Polarization as if antennas are swapped (type matches input) """ cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()} if isinstance(pol, str): cpol = cpol_dict[pol.lower()] elif isinstance(pol, Iterable): cpol = [conj_pol(p) for p in pol] elif isinstance(pol, (int, np.int32, np.int64)): cpol = polstr2num(cpol_dict[polnum2str(pol).lower()]) else: raise ValueError("Polarization not recognized, cannot be conjugated.") return cpol def reorder_conj_pols(pols): """ Reorder multiple pols, swapping pols that are conjugates of one another. For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy') This is useful for the _key2inds function in the case where an antenna pair is specified but the conjugate pair exists in the data. The conjugated data should be returned in the order of the polarization axis, so after conjugating the data, the pols need to be reordered. For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but the user requests antpair (1, 0), they should get: [(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)] Parameters ---------- pols : array_like of str or int Polarization array (strings or ints). Returns ------- conj_order : ndarray of int Indices to reorder polarization array. """ if not isinstance(pols, Iterable): raise ValueError("reorder_conj_pols must be given an array of polarizations.") cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols] if -1 in conj_order: raise ValueError( "Not all conjugate pols exist in the polarization array provided." ) return conj_order def LatLonAlt_from_XYZ(xyz, check_acceptability=True): """ Calculate lat/lon/alt from ECEF x,y,z. Parameters ---------- xyz : ndarray of float numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. check_acceptability : bool Flag to check XYZ coordinates are reasonable. Returns ------- latitude : ndarray or float latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians longitude : ndarray or float longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians altitude : ndarray or float altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters """ # convert to a numpy array xyz = np.asarray(xyz) if xyz.ndim > 1 and xyz.shape[1] != 3: raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).") squeeze = xyz.ndim == 1 if squeeze: xyz = xyz[np.newaxis, :] xyz = np.ascontiguousarray(xyz.T, dtype=np.float64) # checking for acceptable values if check_acceptability: norms = np.linalg.norm(xyz, axis=0) if not all(np.logical_and(norms >= 6.35e6, norms <= 6.39e6)): raise ValueError("xyz values should be ECEF x, y, z coordinates in meters") # this helper function returns one 2D array because it is less overhead for cython lla = _utils._lla_from_xyz(xyz) if squeeze: return lla[0, 0], lla[1, 0], lla[2, 0] return lla[0], lla[1], lla[2] def XYZ_from_LatLonAlt(latitude, longitude, altitude): """ Calculate ECEF x,y,z from lat/lon/alt values. Parameters ---------- latitude : ndarray or float latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians longitude : ndarray or float longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians altitude : ndarray or float altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters Returns ------- xyz : ndarray of float numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. """ latitude = np.ascontiguousarray(latitude, dtype=np.float64) longitude = np.ascontiguousarray(longitude, dtype=np.float64) altitude = np.ascontiguousarray(altitude, dtype=np.float64) n_pts = latitude.size if longitude.size != n_pts: raise ValueError( "latitude, longitude and altitude must all have the same length" ) if altitude.size != n_pts: raise ValueError( "latitude, longitude and altitude must all have the same length" ) xyz = _utils._xyz_from_latlonalt(latitude, longitude, altitude) xyz = xyz.T if n_pts == 1: return xyz[0] return xyz def rotECEF_from_ECEF(xyz, longitude): """ Get rotated ECEF positions such that the x-axis goes through the longitude. Miriad and uvfits expect antenna positions in this frame (with longitude of the array center/telescope location) Parameters ---------- xyz : ndarray of float numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. longitude : float longitude in radians to rotate coordinates to (usually the array center/telescope location). Returns ------- ndarray of float Rotated ECEF coordinates, shape (Npts, 3). """ angle = -1 * longitude rot_matrix = np.array( [ [np.cos(angle), -1 * np.sin(angle), 0], [np.sin(angle), np.cos(angle), 0], [0, 0, 1], ] ) return rot_matrix.dot(xyz.T).T def ECEF_from_rotECEF(xyz, longitude): """ Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF). Parameters ---------- xyz : ndarray of float numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates. longitude : float longitude in radians giving the x direction of the rotated coordinates (usually the array center/telescope location). Returns ------- ndarray of float ECEF coordinates, shape (Npts, 3). """ angle = longitude rot_matrix = np.array( [ [np.cos(angle), -1 * np.sin(angle), 0], [np.sin(angle), np.cos(angle), 0], [0, 0, 1], ] ) return rot_matrix.dot(xyz.T).T def ENU_from_ECEF(xyz, latitude, longitude, altitude): """ Calculate local ENU (east, north, up) coordinates from ECEF coordinates. Parameters ---------- xyz : ndarray of float numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. latitude : float Latitude of center of ENU coordinates in radians. longitude : float Longitude of center of ENU coordinates in radians. altitude : float Altitude of center of ENU coordinates in radians. Returns ------- ndarray of float numpy array, shape (Npts, 3), with local ENU coordinates """ xyz = np.asarray(xyz) if xyz.ndim > 1 and xyz.shape[1] != 3: raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).") squeeze = False if xyz.ndim == 1: squeeze = True xyz = xyz[np.newaxis, :] xyz = np.ascontiguousarray(xyz.T, dtype=np.float64) # check that these are sensible ECEF values -- their magnitudes need to be # on the order of Earth's radius ecef_magnitudes = np.linalg.norm(xyz, axis=0) sensible_radius_range = (6.35e6, 6.39e6) if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any( ecef_magnitudes >= sensible_radius_range[1] ): raise ValueError( "ECEF vector magnitudes must be on the order of the radius of the earth" ) # the cython utility expects (3, Npts) for faster manipulation # transpose after we get the array back to match the expected shape enu = _utils._ENU_from_ECEF( xyz, np.ascontiguousarray(latitude, dtype=np.float64), np.ascontiguousarray(longitude, dtype=np.float64), np.ascontiguousarray(altitude, dtype=np.float64), ) enu = enu.T if squeeze: enu = np.squeeze(enu) return enu def ECEF_from_ENU(enu, latitude, longitude, altitude): """ Calculate ECEF coordinates from local ENU (east, north, up) coordinates. Parameters ---------- enu : ndarray of float numpy array, shape (Npts, 3), with local ENU coordinates. latitude : float Latitude of center of ENU coordinates in radians. longitude : float Longitude of center of ENU coordinates in radians. altitude : float Altitude of center of ENU coordinates in radians. Returns ------- xyz : ndarray of float numpy array, shape (Npts, 3), with ECEF x,y,z coordinates. """ enu = np.asarray(enu) if enu.ndim > 1 and enu.shape[1] != 3: raise ValueError("The expected shape of the ENU array is (Npts, 3).") squeeze = False if enu.ndim == 1: squeeze = True enu = enu[np.newaxis, :] enu = np.ascontiguousarray(enu.T, dtype=np.float64) # the cython utility expects (3, Npts) for faster manipulation # transpose after we get the array back to match the expected shape xyz = _utils._ECEF_from_ENU( enu, np.ascontiguousarray(latitude, dtype=np.float64), np.ascontiguousarray(longitude, dtype=np.float64), np.ascontiguousarray(altitude, dtype=np.float64), ) xyz = xyz.T if squeeze: xyz = np.squeeze(xyz) return xyz def phase_uvw(ra, dec, initial_uvw): """ Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame. This code expects input uvws or positions relative to the telescope location in the same frame that ra/dec are in (e.g. icrs or gcrs) and returns phased ones in the same frame. Note that this code is nearly identical to ENU_from_ECEF, except that it uses an arbitrary phasing center rather than a coordinate center. Parameters ---------- ra : float Right ascension of phase center. dec : float Declination of phase center. initial_uvw : ndarray of float Unphased uvws or positions relative to the array center, shape (Nlocs, 3). Returns ------- uvw : ndarray of float uvw array in the same frame as initial_uvws, ra and dec. """ if initial_uvw.ndim == 1: initial_uvw = initial_uvw[np.newaxis, :] return _utils._phase_uvw( np.float64(ra), np.float64(dec), np.ascontiguousarray(initial_uvw.T, dtype=np.float64), ).T def unphase_uvw(ra, dec, uvw): """ Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame. This code expects phased uvws or positions in the same frame that ra/dec are in (e.g. icrs or gcrs) and returns unphased ones in the same frame. Parameters ---------- ra : float Right ascension of phase center. dec : float Declination of phase center. uvw : ndarray of float Phased uvws or positions relative to the array center, shape (Nlocs, 3). Returns ------- unphased_uvws : ndarray of float Unphased uvws or positions relative to the array center, shape (Nlocs, 3). """ if uvw.ndim == 1: uvw = uvw[np.newaxis, :] return _utils._unphase_uvw( np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64), ).T def polar2_to_cart3(lon_array, lat_array): """ Convert 2D polar coordinates into 3D cartesian coordinates. This is a simple routine for converting a set of spherical angular coordinates into a 3D cartesian vectors, where the x-direction is set by the position (0, 0). Parameters ---------- lon_array : float or ndarray Longitude coordinates, which increases in the counter-clockwise direction. Units of radians. Can either be a float or ndarray -- if the latter, must have the same shape as lat_array. lat_array : float or ndarray Latitude coordinates, where 0 falls on the equator of the sphere. Units of radians. Can either be a float or ndarray -- if the latter, must have the same shape as lat_array. Returns ------- xyz_array : ndarray of float Cartesian coordinates of the given longitude and latitude on a unit sphere. Shape is (3, coord_shape), where coord_shape is the shape of lon_array and lat_array if they were provided as type ndarray, otherwise (3,). """ # Check to make sure that we are not playing with mixed types if type(lon_array) is not type(lat_array): raise ValueError( "lon_array and lat_array must either both be floats or ndarrays." ) if isinstance(lon_array, np.ndarray): if lon_array.shape != lat_array.shape: raise ValueError("lon_array and lat_array must have the same shape.") # Once we know that lon_array and lat_array are of the same shape, # time to create our 3D set of vectors! xyz_array = np.array( [ np.cos(lon_array) * np.cos(lat_array), np.sin(lon_array) * np.cos(lat_array), np.sin(lat_array), ], dtype=float, ) return xyz_array def cart3_to_polar2(xyz_array): """ Convert 3D cartesian coordinates into 2D polar coordinates. This is a simple routine for converting a set of 3D cartesian vectors into spherical coordinates, where the position (0, 0) lies along the x-direction. Parameters ---------- xyz_array : ndarray of float Cartesian coordinates, need not be of unit vector length. Shape is (3, coord_shape). Returns ------- lon_array : ndarray of float Longitude coordinates, which increases in the counter-clockwise direction. Units of radians, shape is (coord_shape,). lat_array : ndarray of float Latitude coordinates, where 0 falls on the equator of the sphere. Units of radians, shape is (coord_shape,). """ if not isinstance(xyz_array, np.ndarray): raise ValueError("xyz_array must be an ndarray.") if xyz_array.ndim == 0: raise ValueError("xyz_array must have ndim > 0") if xyz_array.shape[0] != 3: raise ValueError("xyz_array must be length 3 across the zeroth axis.") # The longitude coord is relatively easy to calculate, just take the X and Y # components and find the arctac of the pair. lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float) # If we _knew_ that xyz_array was always of length 1, then this call could be a much # simpler one to arcsin. But to make this generic, we'll use the length of the XY # component along with arctan2. lat_array = np.arctan2( xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float ) # Return the two arrays return lon_array, lat_array def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot): """ Apply a rotation matrix to a series of vectors. This is a simple convenience function which wraps numpy's matmul function for use with various vector rotation functions in this module. This code could, in principle, be replaced by a cythonized piece of code, although the matmul function is _pretty_ well optimized already. This function is not meant to be called by users, but is instead used by multiple higher-level utility functions (namely those that perform rotations). Parameters ---------- xyz_array : ndarray of floats Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec) or (1, 3, n_vec), the latter is useful for when performing multiple rotations on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec), or (3,). rot_matrix : ndarray of floats Series of rotation matricies to be applied to the stack of vectors. Must be of shape (n_rot, 3, 3) n_rot : int Number of individual rotation matricies to be applied. Returns ------- rotated_xyz : ndarray of floats Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,). """ # Do a quick check to make sure that things look sensible if rot_matrix.shape != (n_rot, 3, 3): raise ValueError( "rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot ) if (xyz_array.ndim == 3) and ( (xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3) ): raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).") if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3): raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).") rotated_xyz = np.matmul(rot_matrix, xyz_array) return rotated_xyz def _rotate_one_axis(xyz_array, rot_amount, rot_axis): """ Rotate an array of 3D positions around the a single axis (x, y, or z). This function performs a basic rotation of 3D vectors about one of the priciple axes -- the x-axis, the y-axis, or the z-axis. Note that the rotations here obey the right-hand rule -- that is to say, from the perspective of the positive side of the axis of rotation, a positive rotation will cause points on the plane intersecting this axis to move in a counter-clockwise fashion. Parameters ---------- xyz_array : ndarray of float Set of 3-dimensional vectors be rotated, in typical right-handed cartesian order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors). rot_amount : float or ndarray of float Amount (in radians) to rotate the given set of coordinates. Can either be a single float (or ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise expected to be shape (Nrot,). rot_axis : int Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis, and 2 is the z-axis. Returns ------- rotated_xyz : ndarray of float Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector). """ # If rot_amount is None or all zeros, then this is just one big old no-op. if (rot_amount is None) or np.all(rot_amount == 0.0): if np.ndim(xyz_array) == 1: return deepcopy(xyz_array[np.newaxis, :, np.newaxis]) elif np.ndim(xyz_array) == 2: return deepcopy(xyz_array[np.newaxis, :, :]) else: return deepcopy(xyz_array) # Check and see how big of a rotation matrix we need n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0]) n_vec = xyz_array.shape[-1] # The promotion of values to float64 is to suppress numerical precision issues, # since the matrix math can - in limited circumstances - introduce precision errors # of order 10x the limiting numerical precision of the float. For a float32/single, # thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to # a part in 1e15. rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64) # Figure out which pieces of the matrix we need to update temp_jdx = (rot_axis + 1) % 3 temp_idx = (rot_axis + 2) % 3 # Fill in the rotation matricies accordingly rot_matrix[rot_axis, rot_axis] = 1 rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64) rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx] rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64) rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx] # The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements # of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3) rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1]) if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3): # This is a special case where we allow the rotation axis to "expand" along # the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1 # but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and # swap the n_vector and n_rot axes, and then swap them back once everything # else is done. return np.transpose( _rotate_matmul_wrapper( np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot, ), axes=[2, 1, 0], ) else: return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot) def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): """ Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z). This function performs a sequential pair of basic rotations of 3D vectors about the priciple axes -- the x-axis, the y-axis, or the z-axis. Note that the rotations here obey the right-hand rule -- that is to say, from the perspective of the positive side of the axis of rotation, a positive rotation will cause points on the plane intersecting this axis to move in a counter-clockwise fashion. Parameters ---------- xyz_array : ndarray of float Set of 3-dimensional vectors be rotated, in typical right-handed cartesian order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors). rot_amount1 : float or ndarray of float Amount (in radians) of rotatation to apply during the first rotation of the sequence, to the given set of coordinates. Can either be a single float (or ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise expected to be shape (Nrot,). rot_amount2 : float or ndarray of float Amount (in radians) of rotatation to apply during the second rotation of the sequence, to the given set of coordinates. Can either be a single float (or ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise expected to be shape (Nrot,). rot_axis1 : int Axis around which the first rotation is applied. 0 is the x-axis, 1 is the y-axis, and 2 is the z-axis. rot_axis2 : int Axis around which the second rotation is applied. 0 is the x-axis, 1 is the y-axis, and 2 is the z-axis. Returns ------- rotated_xyz : ndarray of float Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector). """ # Capture some special cases upfront, where we can save ourselves a bit of work no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0) no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0) if no_rot1 and no_rot2: # If rot_amount is None, then this is just one big old no-op. return deepcopy(xyz_array) elif no_rot1: # If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation return _rotate_one_axis(xyz_array, rot_amount2, rot_axis2) elif no_rot2: # If rot_amount2 is None, then ignore it and just work w/ the 1st rotation return _rotate_one_axis(xyz_array, rot_amount1, rot_axis1) elif rot_axis1 == rot_axis2: # Capture the case where someone wants to do a sequence of rotations on the same # axis. Also known as just rotating a single axis. return _rotate_one_axis(xyz_array, rot_amount1 + rot_amount2, rot_axis1) # Figure out how many individual rotation matricies we need, accounting for the # fact that these can either be floats or ndarrays. n_rot = max( rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1, rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1, ) n_vec = xyz_array.shape[-1] # The promotion of values to float64 is to suppress numerical precision issues, # since the matrix math can - in limited circumstances - introduce precision errors # of order 10x the limiting numerical precision of the float. For a float32/single, # thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to # a part in 1e15. rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64) # There are two permulations per pair of axes -- when the pair is right-hand # oriented vs left-hand oriented. Check here which one it is. For example, # rotating first on the x-axis, second on the y-axis is considered a # "right-handed" pair, whereas z-axis first, then y-axis would be considered # a "left-handed" pair. lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1 temp_idx = [ np.mod(rot_axis1 - lhd_order, 3), np.mod(rot_axis1 + 1 - lhd_order, 3), np.mod(rot_axis1 + 2 - lhd_order, 3), ] # We're using lots of sin and cos calculations -- doing them once upfront saves # quite a bit of time by eliminating redundant calculations sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64) cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64) sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64) cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64) # Take care of the diagonal terms first, since they aren't actually affected by the # order of rotational opertations rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi # Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix # for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just # a transpose of the right-hand orientation of the same pair (e.g., y-rot, then # x-rot). rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = ( cos_lo * sin_hi * ((-1.0) ** lhd_order) ) rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0 rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * ( (-1.0) ** (1 + lhd_order) ) rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * ( (-1.0) ** (1 + lhd_order) ) rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = ( sin_lo * cos_hi * ((-1.0) ** (lhd_order)) ) # The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements # of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3) rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1]) if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3): # This is a special case where we allow the rotation axis to "expand" along # the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1 # but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and # swap the n_vector and n_rot axes, and then swap them back once everything # else is done. return np.transpose( _rotate_matmul_wrapper( np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot, ), axes=[2, 1, 0], ) else: return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot) def calc_uvw( app_ra=None, app_dec=None, frame_pa=None, lst_array=None, use_ant_pos=True, uvw_array=None, antenna_positions=None, antenna_numbers=None, ant_1_array=None, ant_2_array=None, old_app_ra=None, old_app_dec=None, old_frame_pa=None, telescope_lat=None, telescope_lon=None, from_enu=False, to_enu=False, ): """ Calculate an array of baseline coordinates, in either uvw or ENU. This routine is meant as a convenience function for producing baseline coordinates based under a few different circumstances: 1) Calculating ENU coordinates using antenna positions 2) Calculating uwv coordinates at a given sky position using antenna positions 3) Converting from ENU coordinates to uvw coordinates 4) Converting from uvw coordinate to ENU coordinates 5) Converting from uvw coordinates at one sky position to another sky position Different conversion pathways have different parameters that are required. Parameters ---------- app_ra : ndarray of float Apparent RA of the target phase center, required if calculating baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are radians. app_dec : ndarray of float Apparent declination of the target phase center, required if calculating baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are radians. frame_pa : ndarray of float Position angle between the great circle of declination in the apparent frame versus that of the reference frame, used for making sure that "North" on the derived maps points towards a particular celestial pole (not just the topocentric one). Required if not deriving baseline coordinates from antenna positions, from_enu=False, and a value for old_frame_pa is given. Shape is (Nblts,), units are radians. old_app_ra : ndarray of float Apparent RA of the previous phase center, required if not deriving baseline coordinates from antenna positions and from_enu=False. Shape is (Nblts,), units are radians. old_app_dec : ndarray of float Apparent declination of the previous phase center, required if not deriving baseline coordinates from antenna positions and from_enu=False. Shape is (Nblts,), units are radians. old_frame_pa : ndarray of float Frame position angle of the previous phase center, required if not deriving baseline coordinates from antenna positions, from_enu=False, and a value for frame_pa is supplied. Shape is (Nblts,), units are radians. lst_array : ndarray of float Local apparent sidereal time, required if deriving baseline coordinates from antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,). use_ant_pos : bool Switch to determine whether to derive uvw values from the antenna positions (if set to True), or to use the previously calculated uvw coordinates to derive new the new baseline vectors (if set to False). Default is True. uvw_array : ndarray of float Array of previous baseline coordinates (in either uvw or ENU), required if not deriving new coordinates from antenna positions. Shape is (Nblts, 3). antenna_positions : ndarray of float List of antenna positions relative to array center in ECEF coordinates, required if not providing `uvw_array`. Shape is (Nants, 3). antenna_numbers: ndarray of int List of antenna numbers, ordered in the same way as `antenna_positions` (e.g., `antenna_numbers[0]` should given the number of antenna that resides at ECEF position given by `antenna_positions[0]`). Shape is (Nants,), requred if not providing `uvw_array`. Contains all unique entires of the joint set of `ant_1_array` and `ant_2_array`. ant_1_array : ndarray of int Antenna number of the first antenna in the baseline pair, for all baselines Required if not providing `uvw_array`, shape is (Nblts,). ant_2_array : ndarray of int Antenna number of the second antenna in the baseline pair, for all baselines Required if not providing `uvw_array`, shape is (Nblts,). telescope_lat : float Latitude of the phase center, units radians, required if deriving baseline coordinates from antenna positions, or converting to/from ENU coordinates. telescope_lon : float Longitude of the phase center, units radians, required if deriving baseline coordinates from antenna positions, or converting to/from ENU coordinates. from_enu : boolean Set to True if uvw_array is expressed in ENU coordinates. Default is False. to_enu : boolean Set to True if you would like the output expressed in EN coordinates. Default is False. Returns ------- new_coords : ndarray of float64 Set of baseline coordinates, shape (Nblts, 3). """ if to_enu: if lst_array is None and not use_ant_pos: raise ValueError( "Must include lst_array to calculate baselines in ENU coordinates!" ) if telescope_lat is None: raise ValueError( "Must include telescope_lat to calculate baselines " "in ENU coordinates!" ) else: if ((app_ra is None) or (app_dec is None)) and frame_pa is None: raise ValueError( "Must include both app_ra and app_dec, or frame_pa to calculate " "baselines in uvw coordinates!" ) if use_ant_pos: # Assume at this point we are dealing w/ antenna positions if antenna_positions is None: raise ValueError("Must include antenna_positions if use_ant_pos=True.") if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None): raise ValueError( "Must include ant_1_array, ant_2_array, and antenna_numbers " "setting use_ant_pos=True." ) if lst_array is None and not to_enu: raise ValueError( "Must include lst_array if use_ant_pos=True and not calculating " "baselines in ENU coordinates." ) if telescope_lon is None: raise ValueError("Must include telescope_lon if use_ant_pos=True.") ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)} ant_1_index = np.array([ant_dict[idx] for idx in ant_1_array], dtype=int) ant_2_index = np.array([ant_dict[idx] for idx in ant_2_array], dtype=int) N_ants = antenna_positions.shape[0] # Use the app_ra, app_dec, and lst_array arrays to figure out how many unique # rotations are actually needed. If the ratio of Nblts to number of unique # entries is favorable, we can just rotate the antenna positions and save # outselves a bit of work. if to_enu: # If to_enu, skip all this -- there's only one unique ha + dec combo unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_) unique_mask[0] = True else: unique_mask = np.append( True, ( ((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:])) | (app_dec[:-1] != app_dec[1:]) ), ) # GHA -> Hour Angle as measured at Greenwich (because antenna coords are # centered such that x-plane intersects the meridian at longitude 0). if to_enu: # Unphased coordinates appear to be stored in ENU coordinates -- that's # equivalent to calculating uvw's based on zenith. We can use that to our # advantage and spoof the gha and dec based on telescope lon and lat unique_gha = np.zeros(1) - telescope_lon unique_dec = np.zeros(1) + telescope_lat unique_pa = None else: unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon unique_dec = app_dec[unique_mask] unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask] # Tranpose the ant vectors so that they are in the proper shape ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :] # Apply rotations, and then reorganize the ndarray so that you can access # individual antenna vectors quickly. ant_rot_vectors = np.reshape( np.transpose( _rotate_one_axis( _rotate_two_axis(ant_vectors, unique_gha, unique_dec, 2, 1), unique_pa, 0, ), axes=[0, 2, 1], ), (-1, 3), ) unique_mask[0] = False unique_map = np.cumsum(unique_mask) * N_ants new_coords = ( ant_rot_vectors[unique_map + ant_2_index] - ant_rot_vectors[unique_map + ant_1_index] ) else: if uvw_array is None: raise ValueError("Must include uvw_array if use_ant_pos=False.") if from_enu: if to_enu: # Well this was pointless... returning your uvws unharmed return uvw_array # Unphased coordinates appear to be stored in ENU coordinates -- that's # equivalent to calculating uvw's based on zenith. We can use that to our # advantage and spoof old_app_ra and old_app_dec based on lst_array and # telescope_lat if telescope_lat is None: raise ValueError( "Must include telescope_lat if moving between " 'ENU (i.e., "unphased") and uvw coordinates!' ) if lst_array is None: raise ValueError( 'Must include lst_array if moving between ENU (i.e., "unphased") ' "and uvw coordinates!" ) else: if (old_frame_pa is None) and not (frame_pa is None or to_enu): raise ValueError( "Must include old_frame_pa values if data are phased and " "applying new position angle values (frame_pa)." ) if ((old_app_ra is None) and not (app_ra is None or to_enu)) or ( (old_app_dec is None) and not (app_dec is None or to_enu) ): raise ValueError( "Must include old_app_ra and old_app_dec values when data are " "already phased and phasing to a new position." ) # For this operation, all we need is the delta-ha coverage, which _should_ be # entirely encapsulated by the change in RA. if (app_ra is None) and (old_app_ra is None): gha_delta_array = 0.0 else: gha_delta_array = (lst_array if from_enu else old_app_ra) - ( lst_array if to_enu else app_ra ) # Notice below there's an axis re-orientation here, to go from uvw -> XYZ, # where X is pointing in the direction of the source. This is mostly here # for convenience and code legibility -- a slightly different pair of # rotations would give you the same result w/o needing to cycle the axes. # Up front, we want to trap the corner-case where the sky position you are # phasing up to hasn't changed, just the position angle (i.e., which way is # up on the map). This is a much easier transform to handle. if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec): new_coords = _rotate_one_axis( uvw_array[:, [2, 0, 1], np.newaxis], frame_pa - (0.0 if old_frame_pa is None else old_frame_pa), 0, )[:, :, 0] else: new_coords = _rotate_two_axis( _rotate_two_axis( # Yo dawg, I heard you like rotation maticies... uvw_array[:, [2, 0, 1], np.newaxis], 0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa), (-telescope_lat) if from_enu else (-old_app_dec), 0, 1, ), gha_delta_array, telescope_lat if to_enu else app_dec, 2, 1, ) # One final rotation applied here, to compensate for the fact that we want # the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with # the chosen frame, if we not in ENU coordinates if not to_enu: new_coords = _rotate_one_axis(new_coords, frame_pa, 0) # Finally drop the now-vestigal last axis of the array new_coords = new_coords[:, :, 0] # There's one last task to do, which is to re-align the axes from projected # XYZ -> uvw, where X (which points towards the source) falls on the w axis, # and Y and Z fall on the u and v axes, respectively. return new_coords[:, [1, 2, 0]] def transform_sidereal_coords( lon, lat, in_coord_frame, out_coord_frame, in_coord_epoch=None, out_coord_epoch=None, time_array=None, ): """ Transform a given set of coordinates from one sidereal coordinate frame to another. Uses astropy to convert from a coordinates from sidereal frame into another. This function will support transforms from several frames, including GCRS, FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and a few others (basically anything that doesn't require knowing the observers location on Earth/other celestial body). Parameters ---------- lon_coord : float or ndarray of floats Logitudinal coordinate to be transformed, typically expressed as the right ascension, in units of radians. Can either be a float, or an ndarray of floats with shape (Ncoords,). Must agree with lat_coord. lat_coord : float or ndarray of floats Latitudinal coordinate to be transformed, typically expressed as the declination, in units of radians. Can either be a float, or an ndarray of floats with shape (Ncoords,). Must agree with lon_coord. in_coord_frame : string Reference frame for the provided coordinates. Expected to match a list of those supported within the astropy SkyCoord object. An incomplete list includes 'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'. out_coord_frame : string Reference frame to output coordinates in. Expected to match a list of those supported within the astropy SkyCoord object. An incomplete list includes 'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'. in_coord_epoch : float Epoch for the input coordinate frame. Optional parameter, only required when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are in fractional years. out_coord_epoch : float Epoch for the output coordinate frame. Optional parameter, only required when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are in fractional years. time_array : float or ndarray of floats Julian date(s) to which the coordinates correspond to, only used in frames with annular motion terms (e.g., abberation in GCRS). Can either be a float, or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord and lon_coord are floats, or that Ntimes == Ncoords. Returns ------- new_lat : float or ndarray of floats Longitudinal coordinates, in units of radians. Output will be an ndarray if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. new_lon : float or ndarray of floats Latidudinal coordinates, in units of radians. Output will be an ndarray if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. """ lon_coord = lon * units.rad lat_coord = lat * units.rad # Check here to make sure that lat_coord and lon_coord are the same length, # either 1 or len(time_array) if lat_coord.shape != lon_coord.shape: raise ValueError("lon and lat must be the same shape.") if lon_coord.ndim == 0: lon_coord.shape += (1,) lat_coord.shape += (1,) # Check to make sure that we have a properly formatted epoch for our in-bound # coordinate frame in_epoch = None if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time): # If its a string or a Time object, we don't need to do anything more in_epoch = Time(in_coord_epoch) elif in_coord_epoch is not None: if in_coord_frame.lower() in ["fk4", "fk4noeterms"]: in_epoch = Time(in_coord_epoch, format="byear") else: in_epoch = Time(in_coord_epoch, format="jyear") # Now do the same for the outbound frame out_epoch = None if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time): # If its a string or a Time object, we don't need to do anything more out_epoch = Time(out_coord_epoch) elif out_coord_epoch is not None: if out_coord_frame.lower() in ["fk4", "fk4noeterms"]: out_epoch = Time(out_coord_epoch, format="byear") else: out_epoch = Time(out_coord_epoch, format="jyear") # Make sure that time array matched up with what we expect. Thanks to astropy # weirdness, time_array has to be the same length as lat/lon coords rep_time = False rep_crds = False if time_array is None: time_obj_array = None else: if isinstance(time_array, Time): time_obj_array = time_array else: time_obj_array = Time(time_array, format="jd", scale="utc") if (time_obj_array.size != 1) and (lon_coord.size != 1): if time_obj_array.shape != lon_coord.shape: raise ValueError( "Shape of time_array must be either that of " " lat_coord/lon_coord if len(time_array) > 1." ) else: rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1) rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1) if rep_crds: lon_coord = np.repeat(lon_coord, len(time_array)) lat_coord = np.repeat(lat_coord, len(time_array)) if rep_time: time_obj_array = Time( np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc", ) coord_object = SkyCoord( lon_coord, lat_coord, frame=in_coord_frame, equinox=in_epoch, obstime=time_obj_array, ) # Easiest, most general way to transform to the new frame is to create a dummy # SkyCoord with all the attributes needed -- note that we particularly need this # in order to use a non-standard equinox/epoch new_coord = coord_object.transform_to( SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch) ) return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad def transform_icrs_to_app( time_array, ra, dec, telescope_loc, epoch=2000.0, pm_ra=None, pm_dec=None, vrad=None, dist=None, astrometry_library="erfa", ): """ Transform a set of coordinates in ICRS to topocentric/apparent coordinates. This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate the apparent (i.e., topocentric) coordinates of a source at a given time and location, given a set of coordinates expressed in the ICRS frame. These coordinates are most typically used for defining the phase center of the array (i.e, calculating baseline vectors). As of astropy v4.2, the agreement between the three libraries is consistent down to the level of better than 1 mas, with the values produced by astropy and pyERFA consistent to bettter than 10 µas (this is not surprising, given that astropy uses pyERFA under the hood for astrometry). ERFA is the default as it outputs coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as well as the fact that of the three libraries, it produces results the fastest. Parameters ---------- time_array : float or array-like of float Julian dates to calculate coordinate positions for. Can either be a single float, or an array-like of shape (Ntimes,). ra : float or array-like of float ICRS RA of the celestial target, expressed in units of radians. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (with the exception of telescope location parameters). dec : float or array-like of float ICRS Dec of the celestial target, expressed in units of radians. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (with the exception of telescope location parameters). telescope_loc : array-like of floats or EarthLocation ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center of the array. Can either be provided as an astropy EarthLocation, or a tuple of shape (3,) containung (in order) the latitude, longitude, and altitude, in units of radians, radians, and meters, respectively. epoch : int or float or str or Time object Epoch of the coordinate data supplied, only used when supplying proper motion values. If supplying a number, it will assumed to be in Julian years. Default is J2000.0. pm_ra : float or array-like of float Proper motion in RA of the source, expressed in units of milliarcsec / year. Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS values should be set to their expected values when the epoch is 2000.0). Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Note that units are in dRA/dt, not cos(Dec)*dRA/dt. Not required. pm_dec : float or array-like of float Proper motion in Dec of the source, expressed in units of milliarcsec / year. Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS values should be set to their expected values when the epoch is 2000.0). Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required. vrad : float or array-like of float Radial velocity of the source, expressed in units of km / sec. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required. dist : float or array-like of float Distance of the source, expressed in milliarcseconds. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required. astrometry_library : str Library used for running the coordinate conversions. Allowed options are 'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library), and 'astropy' (which uses the astropy utilities). Default is erfa. Returns ------- app_ra : ndarray of floats Apparent right ascension coordinates, in units of radians, of shape (Ntimes,). app_dec : ndarray of floats Apparent declination coordinates, in units of radians, of shape (Ntimes,). """ # Make sure that the library requested is actually permitted if astrometry_library not in ["erfa", "novas", "astropy"]: raise ValueError( "Requested coordinate transformation library is not supported, please " "select either 'erfa', 'novas', or 'astropy' for astrometry_library." ) ra_coord = ra * units.rad dec_coord = dec * units.rad # Check here to make sure that ra_coord and dec_coord are the same length, # either 1 or len(time_array) multi_coord = ra_coord.size != 1 if ra_coord.shape != dec_coord.shape: raise ValueError("ra and dec must be the same shape.") pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr) pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr) d_coord = ( None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc) ) v_coord = None if vrad is None else vrad * (units.km / units.s) opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord] opt_names = ["pm_ra", "pm_dec", "dist", "vrad"] # Check the optional inputs, make sure that they're sensible for item, name in zip(opt_list, opt_names): if item is not None: if ra_coord.shape != item.shape: raise ValueError("%s must be the same shape as ra and dec." % name) if isinstance(telescope_loc, EarthLocation): site_loc = telescope_loc else: site_loc = EarthLocation.from_geodetic( telescope_loc[1] * (180.0 / np.pi), telescope_loc[0] * (180.0 / np.pi), height=telescope_loc[2], ) # Useful for both astropy and novas methods, the latter of which gives easy # access to the IERS data that we want. if isinstance(time_array, Time): time_obj_array = time_array else: time_obj_array = Time(time_array, format="jd", scale="utc") if time_obj_array.size != 1: if (time_obj_array.shape != ra_coord.shape) and multi_coord: raise ValueError( "time_array must be of either of length 1 (single " "float) or same length as ra and dec." ) elif time_obj_array.ndim == 0: # Make the array at least 1-dimensional so we don't run into indexing # issues later. time_obj_array = Time([time_obj_array]) # Check to make sure that we have a properly formatted epoch for our in-bound # coordinate frame coord_epoch = None if isinstance(epoch, str) or isinstance(epoch, Time): # If its a string or a Time object, we don't need to do anything more coord_epoch = Time(epoch) elif epoch is not None: coord_epoch = Time(epoch, format="jyear") # Note if time_array is a single element multi_time = time_obj_array.size != 1 # Get IERS data, which is needed for NOVAS and ERFA polar_motion_data = iers.earth_orientation_table.get() pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array) delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array) pm_x_array = pm_x_array.to_value("arcsec") pm_y_array = pm_y_array.to_value("arcsec") delta_x_array = delta_x_array.to_value("marcsec") delta_y_array = delta_y_array.to_value("marcsec") # Catch the case where we don't have CIP delta values yet (they don't typically have # predictive values like the polar motion does) delta_x_array[np.isnan(delta_x_array)] = 0.0 delta_y_array[np.isnan(delta_y_array)] = 0.0 # If the source was instantiated w/ floats, it'll be a 0-dim object, which will # throw errors if we try to treat it as an array. Reshape to a 1D array of len 1 # so that all the calls can be uniform if ra_coord.ndim == 0: ra_coord.shape += (1,) dec_coord.shape += (1,) if pm_ra_coord is not None: pm_ra if d_coord is not None: d_coord.shape += (1,) if v_coord is not None: v_coord.shape += (1,) # If there is an epoch and a proper motion, apply that motion now if astrometry_library == "astropy": # Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec # directly, but we can cheat this by going to AltAz, and then coverting back # to apparent RA/Dec using the telescope lat and LAST. if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None): # astropy is a bit weird in how it handles proper motion, so rather than # fight with it to do it all in one step, we separate it into two: first # apply proper motion to ICRS, then transform to topocentric. sky_coord = SkyCoord( ra=ra_coord, dec=dec_coord, pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord), pm_dec=pm_dec_coord, frame="icrs", ) sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch)) ra_coord = sky_coord.ra dec_coord = sky_coord.dec if d_coord is not None: d_coord = d_coord.repeat(ra_coord.size) if v_coord is not None: v_coord = v_coord.repeat(ra_coord.size) sky_coord = SkyCoord( ra=ra_coord, dec=dec_coord, distance=d_coord, radial_velocity=v_coord, frame="icrs", ) azel_data = sky_coord.transform_to( SkyCoord( np.zeros_like(time_obj_array) * units.rad, np.zeros_like(time_obj_array) * units.rad, location=site_loc, obstime=time_obj_array, frame="altaz", ) ) app_ha, app_dec = erfa.ae2hd( azel_data.az.rad, azel_data.alt.rad, site_loc.lat.rad, ) app_ra = np.mod( time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad - app_ha, 2 * np.pi, ) elif astrometry_library == "novas": # Import the NOVAS library only if it's needed/available. try: from novas import compat as novas from novas.compat import eph_manager import novas_de405 # noqa except ImportError as e: # pragma: no cover raise ImportError( "novas and/or novas_de405 are not installed but is required for " "NOVAS functionality" ) from e # Call is needed to load high-precision ephem data in NOVAS jd_start, jd_end, number = eph_manager.ephem_open() # Define the obs location, which is needed to calculate diurnal abb term # and polar wobble corrections site_loc = novas.make_on_surface( site_loc.lat.deg, # latitude in deg site_loc.lon.deg, # Longitude in deg site_loc.height.to_value("m"), # Height in meters 0.0, # Temperature, set to 0 for now (no atm refrac) 0.0, # Pressure, set to 0 for now (no atm refrac) ) # NOVAS wants things in terrestial time and UT1 tt_time_array = time_obj_array.tt.jd ut1_time_array = time_obj_array.ut1.jd gast_array = time_obj_array.sidereal_time("apparent", "greenwich").rad if np.any(tt_time_array < jd_start) or np.any(tt_time_array > jd_end): raise ValueError( "No current support for JPL ephems outside of 1700 - 2300 AD. " "Check back later (or possibly earlier)..." ) app_ra = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape) app_dec = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape) for idx in range(len(app_ra)): if multi_coord or (idx == 0): # Create a catalog entry for the source in question cat_entry = novas.make_cat_entry( "dummy_name", # Dummy source name "GKK", # Catalog ID, fixed for now 156, # Star ID number, fixed for now ra_coord[idx].to_value("hourangle"), dec_coord[idx].to_value("deg"), 0.0 if pm_ra is None else ( pm_ra_coord.to_value("mas/yr") * np.cos(dec_coord[idx].to_value("rad")) ), 0.0 if pm_dec is None else pm_dec_coord.to_value("mas/yr"), 0.0 if (dist is None or np.any(dist == 0.0)) else (d_coord.kiloparsec ** -1.0), 0.0 if (vrad is None) else v_coord.to_value("km/s"), ) # Update polar wobble parameters for a given timestamp if multi_time or (idx == 0): gast = gast_array[idx] pm_x = pm_x_array[idx] * np.cos(gast) + pm_y_array[idx] * np.sin(gast) pm_y = pm_y_array[idx] * np.cos(gast) - pm_x_array[idx] * np.sin(gast) tt_time = tt_time_array[idx] ut1_time = ut1_time_array[idx] novas.cel_pole( tt_time, 2, delta_x_array[idx], delta_y_array[idx], ) # Calculate topocentric RA/Dec values [temp_ra, temp_dec] = novas.topo_star( tt_time, (tt_time - ut1_time) * 86400.0, cat_entry, site_loc, accuracy=0, ) xyz_array = polar2_to_cart3( temp_ra * (np.pi / 12.0), temp_dec * (np.pi / 180.0) ) xyz_array = novas.wobble(tt_time, pm_x, pm_y, xyz_array, 1) app_ra[idx], app_dec[idx] = cart3_to_polar2(np.array(xyz_array)) elif astrometry_library == "erfa": # liberfa wants things in radians pm_x_array *= np.pi / (3600.0 * 180.0) pm_y_array *= np.pi / (3600.0 * 180.0) [_, _, _, app_dec, app_ra, eqn_org] = erfa.atco13( ra_coord.to_value("rad"), dec_coord.to_value("rad"), 0.0 if (pm_ra is None) else pm_ra_coord.to_value("rad/yr"), 0.0 if (pm_dec is None) else pm_dec_coord.to_value("rad/yr"), 0.0 if (dist is None or np.any(dist == 0.0)) else (d_coord.pc ** -1.0), 0.0 if (vrad is None) else v_coord.to_value("km/s"), time_obj_array.utc.jd, 0.0, time_obj_array.delta_ut1_utc, site_loc.lon.rad, site_loc.lat.rad, site_loc.height.to_value("m"), pm_x_array, pm_y_array, 0, # ait pressure, used for refraction (ignored) 0, # amb temperature, used for refraction (ignored) 0, # rel humidity, used for refraction (ignored) 0, # wavelength, used for refraction (ignored) ) app_ra = np.mod(app_ra - eqn_org, 2 * np.pi) return app_ra, app_dec def transform_app_to_icrs( time_array, app_ra, app_dec, telescope_loc, astrometry_library="erfa", ): """ Transform a set of coordinates in topocentric/apparent to ICRS coordinates. This utility uses either astropy or erfa to calculate the ICRS coordinates of a given set of apparent source coordinates. These coordinates are most typically used for defining the celestial/catalog position of a source. Note that at present, this is only implemented in astropy and pyERFA, although it could hypothetically be extended to NOVAS at some point. Parameters ---------- time_array : float or ndarray of float Julian dates to calculate coordinate positions for. Can either be a single float, or an ndarray of shape (Ntimes,). app_ra : float or ndarray of float ICRS RA of the celestial target, expressed in units of radians. Can either be a single float or array of shape (Ncoord,). Note that if time_array is not a singleton value, then Ncoord must be equal to Ntimes. app_dec : float or ndarray of float ICRS Dec of the celestial target, expressed in units of radians. Can either be a single float or array of shape (Ncoord,). Note that if time_array is not a singleton value, then Ncoord must be equal to Ntimes. telescope_loc : tuple of floats or EarthLocation ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center of the array. Can either be provided as an astropy EarthLocation, or a tuple of shape (3,) containung (in order) the latitude, longitude, and altitude, in units of radians, radians, and meters, respectively. Returns ------- icrs_ra : ndarray of floats ICRS right ascension coordinates, in units of radians, of either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). icrs_dec : ndarray of floats ICRS declination coordinates, in units of radians, of either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). """ # Make sure that the library requested is actually permitted if astrometry_library not in ["erfa", "astropy"]: raise ValueError( "Requested coordinate transformation library is not supported, please " "select either 'erfa' or 'astropy' for astrometry_library." ) ra_coord = app_ra * units.rad dec_coord = app_dec * units.rad # Check here to make sure that ra_coord and dec_coord are the same length, # either 1 or len(time_array) multi_coord = ra_coord.size != 1 if ra_coord.shape != dec_coord.shape: raise ValueError("app_ra and app_dec must be the same shape.") if isinstance(telescope_loc, EarthLocation): site_loc = telescope_loc else: site_loc = EarthLocation.from_geodetic( telescope_loc[1] * (180.0 / np.pi), telescope_loc[0] * (180.0 / np.pi), height=telescope_loc[2], ) if isinstance(time_array, Time): time_obj_array = time_array else: time_obj_array = Time(time_array, format="jd", scale="utc") if time_obj_array.size != 1: if (time_obj_array.shape != ra_coord.shape) and multi_coord: raise ValueError( "time_array must be of either of length 1 (single " "float) or same length as ra and dec." ) elif time_obj_array.ndim == 0: # Make the array at least 1-dimensional so we don't run into indexing # issues later. time_obj_array = Time([time_obj_array]) if astrometry_library == "astropy": az_coord, el_coord = erfa.hd2ae( np.mod( time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad - ra_coord.to_value("rad"), 2 * np.pi, ), dec_coord.to_value("rad"), site_loc.lat.rad, ) sky_coord = SkyCoord( az_coord * units.rad, el_coord * units.rad, frame="altaz", location=site_loc, obstime=time_obj_array, ) coord_data = sky_coord.transform_to("icrs") icrs_ra = coord_data.ra.rad icrs_dec = coord_data.dec.rad elif astrometry_library == "erfa": # Get IERS data, which is needed for highest precision polar_motion_data = iers.earth_orientation_table.get() pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array) pm_x_array = pm_x_array.to_value("rad") pm_y_array = pm_y_array.to_value("rad") bpn_matrix = erfa.pnm06a(time_obj_array.tt.jd, 0.0) cip_x, cip_y = erfa.bpn2xy(bpn_matrix) cio_s = erfa.s06(time_obj_array.tt.jd, 0.0, cip_x, cip_y) eqn_org = erfa.eors(bpn_matrix, cio_s) # Observed to ICRS via ERFA icrs_ra, icrs_dec = erfa.atoc13( "r", ra_coord.to_value("rad") + eqn_org, dec_coord.to_value("rad"), time_obj_array.utc.jd, 0.0, # Second half of the UT date, not needed time_obj_array.delta_ut1_utc, site_loc.lon.rad, site_loc.lat.rad, site_loc.height.value, pm_x_array, pm_y_array, 0, # ait pressure, used for refraction (ignored) 0, # amb temperature, used for refraction (ignored) 0, # rel humidity, used for refraction (ignored) 0, # wavelength, used for refraction (ignored) ) # Return back the two RA/Dec arrays return icrs_ra, icrs_dec def calc_parallactic_angle( app_ra, app_dec, lst_array, telescope_lat, ): """ Calculate the parallactic angle between RA/Dec and the AltAz frame. Parameters ---------- app_ra : ndarray of floats Array of apparent RA values in units of radians, shape (Ntimes,). app_dec : ndarray of floats Array of apparent dec values in units of radians, shape (Ntimes,). telescope_lat : float Latitude of the observatory, in units of radians. lst_array : float or ndarray of float Array of local apparent sidereal timesto calculate position angle values for, in units of radians. Can either be a single float or an array of shape (Ntimes,). """ # This is just a simple wrapped around the pas function in ERFA return erfa.pas(app_ra, app_dec, lst_array, telescope_lat) def calc_frame_pos_angle( time_array, app_ra, app_dec, telescope_loc, ref_frame, ref_epoch=None, offset_pos=(np.pi / 360.0), ): """ Calculate an position angle given apparent position and reference frame. This function is used to determine the position angle between the great circle of declination in apparent coordinates, versus that in a given reference frame. Note that this is slightly different than parallactic angle, which is the difference between apparent declination and elevation. Paramters --------- time_array : float or ndarray of floats Array of julian dates to calculate position angle values for, of shape (Ntimes,). app_ra : ndarray of floats Array of apparent RA values in units of radians, shape (Ntimes,). app_dec : ndarray of floats Array of apparent dec values in units of radians, shape (Ntimes,). telescope_loc : tuple of floats or EarthLocation ITRF latitude, longitude, and altitude (rel to sea-level) of the observer. Can either be provided as an astropy EarthLocation, or an array-like of shape (3,) containing the latitude, longitude, and altitude, in that order, with units of radians, radians, and meters, respectively. offset_pos : float Distance of the offset position used to calculate the frame PA. Default is 0.5 degrees, which should be sufficent for most applications. ref_frame : str Coordinate frame to calculate position angles for. Can be any of the several supported frames in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic). ref_epoch : str or flt Epoch of the coordinates, only used when ref_frame = fk4 or fk5. Given in unites of fractional years, either as a float or as a string with the epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0). Returns ------- frame_pa : ndarray of floats Array of position angles, in units of radians. """ # Check to see if the position angles should default to zero if (ref_frame is None) or (ref_frame == "topo"): # No-op detected, ENGAGE MAXIMUM SNARK! return np.zeros_like(time_array) # This creates an array of unique entries of ra + dec + time, since the processing # time for each element can be non-negligible, and entries along the Nblt axis can # be highly redundant. unique_mask = np.union1d( np.union1d( np.unique(app_ra, return_index=True)[1], np.unique(app_dec, return_index=True)[1], ), np.unique(time_array, return_index=True)[1], ) # Pluck out the unique entries for each unique_ra = app_ra[unique_mask] unique_dec = app_dec[unique_mask] unique_time = time_array[unique_mask] # Figure out how many elements we need to transform n_coord = len(unique_mask) # Offset north/south positions by 0.5 deg, such that the PA is determined over a # 1 deg arc. up_dec = unique_dec + (np.pi / 360.0) dn_dec = unique_dec - (np.pi / 360.0) up_ra = dn_ra = unique_ra # Wrap the positions if they happen to go over the poles up_ra[up_dec > (np.pi / 2.0)] = np.mod( up_ra[up_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi ) up_dec[up_dec > (np.pi / 2.0)] = np.pi - up_dec[up_dec > (np.pi / 2.0)] dn_ra[-dn_dec > (np.pi / 2.0)] = np.mod( dn_ra[dn_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi ) dn_dec[-dn_dec > (np.pi / 2.0)] = np.pi - dn_dec[-dn_dec > (np.pi / 2.0)] # Run the set of offset coordinates through the "reverse" transform. The two offset # positions are concat'd together to help reduce overheads ref_ra, ref_dec = calc_sidereal_coords( np.tile(unique_time, 2), np.concatenate((dn_ra, up_ra)), np.concatenate((dn_dec, up_dec)), telescope_loc, ref_frame, coord_epoch=ref_epoch, ) # Use the pas function from ERFA to calculate the position angle. The negative sign # is here because we're measuring PA of app -> frame, but we want frame -> app. unique_pa = -erfa.pas( ref_ra[:n_coord], ref_dec[:n_coord], ref_ra[n_coord:], ref_dec[n_coord:] ) # Finally, we have to go back through and "fill in" the redundant entries frame_pa = np.zeros_like(app_ra) for idx in range(n_coord): select_mask = np.logical_and( np.logical_and(unique_ra[idx] == app_ra, unique_dec[idx] == app_dec,), unique_time[idx] == time_array, ) frame_pa[select_mask] = unique_pa[idx] return frame_pa def lookup_jplhorizons( target_name, time_array, telescope_loc=None, high_cadence=False, force_indv_lookup=None, ): """ Lookup solar system body coordinates via the JPL-Horizons service. This utility is useful for generating ephemerides, which can then be interpolated in order to provide positional data for a target which is moving, such as planetary bodies and other solar system objects. Use of this function requires the installation of the `astroquery` module. Parameters ---------- target_name : str Name of the target to gather an ephemeris for. Must match the name in the JPL-Horizons database. time_array : array-like of float Times in UTC Julian days to gather an ephemeris for. telescope_loc : array-like of float ITRF latitude, longitude, and altitude (rel to sea-level) of the observer. Must be an array-like of shape (3,) containing the latitude, longitude, and altitude, in that order, with units of radians, radians, and meters, respectively. high_cadence : bool If set to True, will calculate ephemeris points every 3 minutes in time, as opposed to the default of every 3 hours. force_indv_lookup : bool If set to True, will calculate coordinate values for each value found within `time_array`. If False, a regularized time grid is sampled that encloses the values contained within `time_array`. Default is False, unless `time_array` is of length 1, in which the default is set to True. Returns ------- ephem_times : ndarray of float Times for which the ephemeris values were calculated, in UTC Julian days. ephem_ra : ndarray of float ICRS Right ascension of the target at the values within `ephem_times`, in units of radians. ephem_dec : ndarray of float ICRS Declination of the target at the values within `ephem_times`, in units of radians. ephem_dist : ndarray of float Distance of the target relative to the observer, at the values within `ephem_times`, in units of parsecs. ephem_vel : ndarray of float Velocity of the targets relative to the observer, at the values within `ephem_times`, in units of km/sec. """ try: from astroquery.jplhorizons import Horizons except ImportError as err: # pragma: no cover raise ImportError( "astroquery is not installed but is required for " "planet ephemeris functionality" ) from err from pyuvdata.data import DATA_PATH from os.path import join as path_join from json import load as json_load # Get the telescope location into a format that JPL-Horizons can understand, # which is nominally a dict w/ entries for lon (units of deg), lat (units of # deg), and elevation (units of km). if isinstance(telescope_loc, EarthLocation): site_loc = { "lon": telescope_loc.lon.deg, "lat": telescope_loc.lat.deg, "elevation": telescope_loc.height.to_value(unit=units.km), } elif telescope_loc is None: # Setting to None will report the geocentric position site_loc = None else: site_loc = { "lon": telescope_loc[1] * (180.0 / np.pi), "lat": telescope_loc[0] * (180.0 / np.pi), "elevation": telescope_loc[2] * (0.001), # m -> km } # If force_indv_lookup is True, or unset but only providing a single value, then # just calculate the RA/Dec for the times requested rather than creating a table # to interpolate from. if force_indv_lookup or ( (np.array(time_array).size == 1) and (force_indv_lookup is None) ): epoch_list = np.unique(time_array) if len(epoch_list) > 50: raise ValueError( "Requesting too many individual ephem points from JPL-Horizons. This " "can be remedied by setting force_indv_lookup=False or limiting the " "number of values in time_array." ) else: # When querying for multiple times, its faster (and kinder to the # good folks at JPL) to create a range to query, and then interpolate # between values. The extra buffer of 0.001 or 0.25 days for high and # low cadence is to give enough data points to allow for spline # interpolation of the data. if high_cadence: start_time = np.min(time_array) - 0.001 stop_time = np.max(time_array) + 0.001 step_time = "3m" n_entries = (stop_time - start_time) * (1440.0 / 3.0) else: # The start/stop time here are setup to maximize reusability of the # data, since astroquery appears to cache the results from previous # queries. start_time = (0.25 * np.floor(4.0 * np.min(time_array))) - 0.25 stop_time = (0.25 * np.ceil(4.0 * np.max(time_array))) + 0.25 step_time = "3h" n_entries = (stop_time - start_time) * (24.0 / 3.0) # We don't want to overtax the JPL service, so limit ourselves to 1000 # individual queries at a time. Note that this is likely a conservative # cap for JPL-Horizons, but there should be exceptionally few applications # that actually require more than this. if n_entries > 1000: if (len(np.unique(time_array)) <= 50) and (force_indv_lookup is None): # If we have a _very_ sparse set of epochs, pass that along instead epoch_list = np.unique(time_array) else: # Otherwise, time to raise an error raise ValueError( "Too many ephem points requested from JPL-Horizons. This " "can be remedied by setting high_cadance=False or limiting " "the number of values in time_array." ) else: epoch_list = { "start": Time(start_time, format="jd").isot, "stop": Time(stop_time, format="jd").isot, "step": step_time, } # Check to make sure dates are within the 1700-2200 time range, # since not all targets are supported outside of this range if (np.min(time_array) < 2341973.0) or (np.max(time_array) > 2524593.0): raise ValueError( "No current support for JPL ephems outside of 1700 - 2300 AD. " "Check back later (or possibly earlier)..." ) # JPL-Horizons has a separate catalog with what it calls 'major bodies', # and will throw an error if you use the wrong catalog when calling for # astrometry. We'll use the dict below to capture this behavior. with open(path_join(DATA_PATH, "jpl_major_bodies.json"), "r") as fhandle: major_body_dict = json_load(fhandle) target_id = target_name id_type = "smallbody" # If we find the target in the major body database, then we can extract the # target ID to make the query a bit more robust (otherwise JPL-Horizons will fail # on account that id will find multiple partial matches: e.g., "Mars" will be # matched with "Mars", "Mars Explorer", "Mars Barycenter"..., and JPL-Horizons will # not know which to choose). if target_name in major_body_dict.keys(): target_id = major_body_dict[target_name] id_type = None query_obj = Horizons( id=target_id, location=site_loc, epochs=epoch_list, id_type=id_type, ) # If not in the major bodies catalog, try the minor bodies list, and if # still not found, throw an error. try: ephem_data = query_obj.ephemerides(extra_precision=True) except KeyError: # This is a fix for an astroquery + JPL-Horizons bug, that's related to # API change on JPL's side. In this case, the source is identified, but # astroquery can't correctly parse the return message from JPL-Horizons. # See astroquery issue #2169. ephem_data = query_obj.ephemerides(extra_precision=False) # pragma: no cover except ValueError as err: query_obj._session.close() raise ValueError( "Target ID is not recognized in either the small or major bodies " "catalogs, please consult the JPL-Horizons database for supported " "targets (https://ssd.jpl.nasa.gov/?horizons)." ) from err # This is explicitly closed here to trap a bug that occassionally throws an # unexpected warning, see astroquery issue #1807 query_obj._session.close() # Now that we have the ephem data, extract out the relevant data ephem_times = np.array(ephem_data["datetime_jd"]) ephem_ra = np.array(ephem_data["RA"]) * (np.pi / 180.0) ephem_dec = np.array(ephem_data["DEC"]) * (np.pi / 180.0) ephem_dist = np.array(ephem_data["delta"]) # AU ephem_vel = np.array(ephem_data["delta_rate"]) # km/s return ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel def interpolate_ephem( time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None, ): """ Interpolates ephemerides to give positions for requested times. This is a simple tool for calculated interpolated RA and Dec positions, as well as distances and velocities, for a given ephemeris. Under the hood, the method uses as cubic spline interpolation to calculate values at the requested times, provided that there are enough values to interpolate over to do so (requires >= 4 points), otherwise a linear interpolation is used. Parameters ---------- time_array : array-like of floats Times to interpolate positions for, in UTC Julian days. ephem_times : array-like of floats Times in UTC Julian days which describe that match to the recorded postions of the target. Must be array-like, of shape (Npts,), where Npts is the number of ephemeris points. ephem_ra : array-like of floats Right ascencion of the target, at the times given in `ephem_times`. Units are in radians, must have the same shape as `ephem_times`. ephem_dec : array-like of floats Declination of the target, at the times given in `ephem_times`. Units are in radians, must have the same shape as `ephem_times`. ephem_dist : array-like of floats Distance of the target from the observer, at the times given in `ephem_times`. Optional argument, in units of parsecs. Must have the same shape as `ephem_times`. ephem_vel : array-like of floats Velocities of the target, at the times given in `ephem_times`. Optional argument, in units of km/sec. Must have the same shape as `ephem_times`. Returns ------- ra_vals : ndarray of float Interpolated RA values, returned as an ndarray of floats with units of radians, and the same shape as `time_array`. dec_vals : ndarray of float Interpolated declination values, returned as an ndarray of floats with units of radians, and the same shape as `time_array`. dist_vals : None or ndarray of float If `ephem_dist` was provided, an ndarray of floats (with same shape as `time_array`) with the interpolated target distances, in units of parsecs. If `ephem_dist` was not provided, this returns as None. vel_vals : None or ndarray of float If `ephem_vals` was provided, an ndarray of floats (with same shape as `time_array`) with the interpolated target velocities, in units of km/sec. If `ephem_vals` was not provided, this returns as None. """ # We're importing this here since it's only used for this one function from scipy.interpolate import interp1d ephem_shape = np.array(ephem_times).shape # Make sure that things look reasonable if np.array(ephem_ra).shape != ephem_shape: raise ValueError("ephem_ra must have the same shape as ephem_times.") if np.array(ephem_dec).shape != ephem_shape: raise ValueError("ephem_dec must have the same shape as ephem_times.") if (np.array(ephem_dist).shape != ephem_shape) and (ephem_dist is not None): raise ValueError("ephem_dist must have the same shape as ephem_times.") if (np.array(ephem_vel).shape != ephem_shape) and (ephem_vel is not None): raise ValueError("ephem_vel must have the same shape as ephem_times.") ra_vals = np.zeros_like(time_array, dtype=float) dec_vals = np.zeros_like(time_array, dtype=float) dist_vals = None if ephem_dist is None else np.zeros_like(time_array, dtype=float) vel_vals = None if ephem_vel is None else np.zeros_like(time_array, dtype=float) if len(ephem_times) == 1: ra_vals += ephem_ra dec_vals += ephem_dec if ephem_dist is not None: dist_vals += ephem_dist if ephem_vel is not None: vel_vals += ephem_vel else: if len(ephem_times) > 3: interp_kind = "cubic" else: interp_kind = "linear" # If we have values that line up perfectly, just use those directly select_mask = np.isin(time_array, ephem_times) if np.any(select_mask): time_select = time_array[select_mask] ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind="nearest")( time_select ) dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind="nearest")( time_select ) if ephem_dist is not None: dist_vals[select_mask] = interp1d( ephem_times, ephem_dist, kind="nearest" )(time_select) if ephem_vel is not None: vel_vals[select_mask] = interp1d( ephem_times, ephem_vel, kind="nearest" )(time_select) # If we have values lining up between grid points, use spline interpolation # to calculate their values select_mask = ~select_mask if np.any(select_mask): time_select = time_array[select_mask] ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind=interp_kind)( time_select ) dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind=interp_kind)( time_select ) if ephem_dist is not None: dist_vals[select_mask] = interp1d( ephem_times, ephem_dist, kind=interp_kind )(time_select) if ephem_vel is not None: vel_vals[select_mask] = interp1d( ephem_times, ephem_vel, kind=interp_kind )(time_select) return (ra_vals, dec_vals, dist_vals, vel_vals) def calc_app_coords( lon_coord, lat_coord, coord_frame="icrs", coord_epoch=None, coord_times=None, coord_type="sidereal", time_array=None, lst_array=None, telescope_loc=None, pm_ra=None, pm_dec=None, vrad=None, dist=None, ): """ Calculate apparent coordinates for several different coordinate types. This function calculates apparent positions at the current epoch. Parameters ---------- lon_coord : float or ndarray of float Longitudinal (e.g., RA) coordinates, units of radians. Must match the same shape as lat_coord. lat_coord : float or ndarray of float Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same shape as lon_coord. coord_frame : string The requested reference frame for the output coordinates, can be any frame that is presently supported by astropy. coord_epoch : float or str or Time object Epoch for ref_frame, nominally only used if converting to either the FK4 or FK5 frames, in units of fractional years. If provided as a float and the coord_frame is an FK4-variant, value will assumed to be given in Besselian years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be in Julian years. coord_times : float or ndarray of float Only used when `coord_type="ephem"`, the JD UTC time for each value of `lon_coord` and `lat_coord`. These values are used to interpolate `lon_coord` and `lat_coord` values to those times listed in `time_array`. coord_type : str coord_type : str Type of source to calculate coordinates for. Must be one of: "sidereal" (fixed RA/Dec), "ephem" (RA/Dec that moves with time), "driftscan" (fixed az/el position), "unphased" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)). time_array : float or ndarray of float or Time object Times for which the apparent coordinates were calculated, in UTC JD. If more than a single element, must be the same shape as lon_coord and lat_coord if both of those are arrays (instead of single floats). telescope_loc : array-like of floats or EarthLocation ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center of the array. Can either be provided as an astropy EarthLocation, or a tuple of shape (3,) containung (in order) the latitude, longitude, and altitude, in units of radians, radians, and meters, respectively. coord_frame : string The requested reference frame for the output coordinates, can be any frame that is presently supported by astropy. Default is ICRS. coord_epoch : float or str or Time object Epoch for ref_frame, nominally only used if converting to either the FK4 or FK5 frames, in units of fractional years. If provided as a float and the ref_frame is an FK4-variant, value will assumed to be given in Besselian years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be in Julian years. pm_ra : float or ndarray of float Proper motion in RA of the source, expressed in units of milliarcsec / year. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required, motion is calculated relative to the value of `coord_epoch`. pm_dec : float or ndarray of float Proper motion in Dec of the source, expressed in units of milliarcsec / year. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required, motion is calculated relative to the value of `coord_epoch`. vrad : float or ndarray of float Radial velocity of the source, expressed in units of km / sec. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required. dist : float or ndarray of float Distance of the source, expressed in milliarcseconds. Can either be a single float or array of shape (Ntimes,), although this must be consistent with other parameters (namely ra_coord and dec_coord). Not required. Returns ------- app_ra : ndarray of floats Apparent right ascension coordinates, in units of radians. app_dec : ndarray of floats Apparent declination coordinates, in units of radians. """ if isinstance(telescope_loc, EarthLocation): site_loc = telescope_loc else: site_loc = EarthLocation.from_geodetic( telescope_loc[1] * (180.0 / np.pi), telescope_loc[0] * (180.0 / np.pi), height=telescope_loc[2], ) # Time objects and unique don't seem to play well together, so we break apart # their handling here if isinstance(time_array, Time): unique_time_array, unique_mask = np.unique(time_array.utc.jd, return_index=True) else: unique_time_array, unique_mask = np.unique(time_array, return_index=True) if coord_type in ["driftscan", "unphased"]: if lst_array is None: unique_lst = get_lst_for_time( unique_time_array, site_loc.lat.deg, site_loc.lon.deg, site_loc.height.to_value("m"), ) else: unique_lst = lst_array[unique_mask] if coord_type == "sidereal": # If the coordinates are not in the ICRS frame, go ahead and transform them now if coord_frame != "icrs": icrs_ra, icrs_dec = transform_sidereal_coords( lon_coord, lat_coord, coord_frame, "icrs", in_coord_epoch=coord_epoch, time_array=unique_time_array, ) else: icrs_ra = lon_coord icrs_dec = lat_coord unique_app_ra, unique_app_dec = transform_icrs_to_app( unique_time_array, icrs_ra, icrs_dec, site_loc, pm_ra=pm_ra, pm_dec=pm_dec, vrad=vrad, dist=dist, ) elif coord_type == "driftscan": # Use the ERFA function ae2hd, which will do all the heavy # lifting for us unique_app_ha, unique_app_dec = erfa.ae2hd( lon_coord, lat_coord, site_loc.lat.rad ) # The above returns HA/Dec, so we just need to rotate by # the LST to get back app RA and Dec unique_app_ra = np.mod(unique_app_ha + unique_lst, 2 * np.pi) unique_app_dec = unique_app_dec + np.zeros_like(unique_app_ra) elif coord_type == "ephem": interp_ra, interp_dec, _, _ = interpolate_ephem( unique_time_array, coord_times, lon_coord, lat_coord, ) if coord_frame != "icrs": icrs_ra, icrs_dec = transform_sidereal_coords( interp_ra, interp_dec, coord_frame, "icrs", in_coord_epoch=coord_epoch, time_array=unique_time_array, ) else: icrs_ra = interp_ra icrs_dec = interp_dec # TODO: Vel and distance handling to be integrated here, once they are are # needed for velocity frame tracking unique_app_ra, unique_app_dec = transform_icrs_to_app( unique_time_array, icrs_ra, icrs_dec, site_loc, pm_ra=pm_ra, pm_dec=pm_dec, ) elif coord_type == "unphased": # This is the easiest one - this is just supposed to be ENU, so set the # apparent coords to the current lst and telescope_lon. unique_app_ra = unique_lst.copy() unique_app_dec = np.zeros_like(unique_app_ra) + site_loc.lat.rad else: raise ValueError("Object type %s is not recognized." % coord_type) # Now that we've calculated all the unique values, time to backfill through the # "redundant" entries in the Nblt axis. app_ra = np.zeros(np.array(time_array).shape) app_dec = np.zeros(np.array(time_array).shape) # Need this promotion in order to match entries if isinstance(time_array, Time): unique_time_array = Time(unique_time_array, format="jd", scale="utc") for idx, unique_time in enumerate(unique_time_array): select_mask = time_array == unique_time app_ra[select_mask] = unique_app_ra[idx] app_dec[select_mask] = unique_app_dec[idx] return app_ra, app_dec def calc_sidereal_coords( time_array, app_ra, app_dec, telescope_loc, coord_frame, coord_epoch=None, ): """ Calculate sidereal coordinates given apparent coordinates. This function calculates coordinates in the requested frame (at a given epoch) from a set of apparent coordinates. Parameters ---------- time_array : float or ndarray of float or Time object Times for which the apparent coordinates were calculated, in UTC JD. Must match the shape of app_ra and app_dec. app_ra : float or ndarray of float Array of apparent right ascension coordinates, units of radians. Must match the shape of time_array and app_dec. app_ra : float or ndarray of float Array of apparent right declination coordinates, units of radians. Must match the shape of time_array and app_dec. telescope_loc : tuple of floats or EarthLocation ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center of the array. Can either be provided as an astropy EarthLocation, or a tuple of shape (3,) containung (in order) the latitude, longitude, and altitude, in units of radians, radians, and meters, respectively. coord_frame : string The requested reference frame for the output coordinates, can be any frame that is presently supported by astropy. Default is ICRS. coord_epoch : float or str or Time object Epoch for ref_frame, nominally only used if converting to either the FK4 or FK5 frames, in units of fractional years. If provided as a float and the ref_frame is an FK4-variant, value will assumed to be given in Besselian years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be in Julian years. Returns ------- ref_ra : ndarray of floats Right ascension coordinates in the requested frame, in units of radians. Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). ref_dec : ndarray of floats Declination coordinates in the requested frame, in units of radians. Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,). """ # Check to make sure that we have a properly formatted epoch for our in-bound # coordinate frame epoch = None if isinstance(coord_epoch, str) or isinstance(coord_epoch, Time): # If its a string or a Time object, we don't need to do anything more epoch = Time(coord_epoch) elif coord_epoch is not None: if coord_frame.lower() in ["fk4", "fk4noeterms"]: epoch = Time(coord_epoch, format="byear") else: epoch = Time(coord_epoch, format="jyear") icrs_ra, icrs_dec = transform_app_to_icrs( time_array, app_ra, app_dec, telescope_loc ) if coord_frame == "icrs": ref_ra, ref_dec = (icrs_ra, icrs_dec) else: ref_ra, ref_dec = transform_sidereal_coords( icrs_ra, icrs_dec, "icrs", coord_frame, out_coord_epoch=epoch, time_array=time_array, ) return ref_ra, ref_dec def get_lst_for_time( jd_array, latitude, longitude, altitude, astrometry_library="erfa" ): """ Get the local apparent sidereal time for a set of jd times at an earth location. This function calculates the local apparent sidereal time (LAST), given a UTC time and a position on the Earth, using either the astropy or NOVAS libraries. It is important to note that there is an apporoximate 20 microsecond difference between the two methods, presumably due to small differences in the apparent reference frame. These differences will cancel out when calculating coordinates in the TOPO frame, so long as apparent coordinates are calculated using the same library (i.e., astropy or NOVAS). Failing to do so can introduce errors up to ~1 mas in the horizontal coordinate system (i.e., AltAz). Parameters ---------- jd_array : ndarray of float JD times to get lsts for. latitude : float Latitude of location to get lst for in degrees. longitude : float Longitude of location to get lst for in degrees. altitude : float Altitude of location to get lst for in meters. astrometry_library : str Library used for running the LST calculations. Allowed options are 'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library), and 'astropy' (which uses the astropy utilities). Default is erfa. Returns ------- ndarray of float LASTs in radians corresponding to the jd_array. """ if isinstance(jd_array, np.ndarray): lst_array = np.zeros_like(jd_array) else: lst_array = np.zeros(1) jd, reverse_inds = np.unique(jd_array, return_inverse=True) times = Time( jd, format="jd", scale="utc", location=(Angle(longitude, unit="deg"), Angle(latitude, unit="deg"), altitude), ) if iers.conf.auto_max_age is None: # pragma: no cover delta, status = times.get_delta_ut1_utc(return_status=True) if np.any( np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)) ): warnings.warn( "time is out of IERS range, setting delta ut1 utc to " "extrapolated value" ) times.delta_ut1_utc = delta if astrometry_library == "erfa": # This appears to be what astropy is using under the hood, # so it _should_ be totally consistent. gast_array = erfa.gst06a(times.ut1.jd, 0.0, times.tt.jd, 0.0) lst_array = np.mod(gast_array + (longitude * (np.pi / 180.0)), 2.0 * np.pi)[ reverse_inds ] elif astrometry_library == "astropy": lst_array = times.sidereal_time("apparent").radian[reverse_inds] elif astrometry_library == "novas": # Import the NOVAS library only if it's needed/available. try: from novas import compat as novas from novas.compat import eph_manager import novas_de405 # noqa except ImportError as e: # pragma: no cover raise ImportError( "novas and/or novas_de405 are not installed but is required for " "NOVAS functionality" ) from e jd_start, jd_end, number = eph_manager.ephem_open() tt_time_array = times.tt.value ut1_time_array = times.ut1.value polar_motion_data = iers.earth_orientation_table.get() delta_x_array = np.interp( times.mjd, polar_motion_data["MJD"].value, polar_motion_data["dX_2000A_B"].value, left=0.0, right=0.0, ) delta_y_array = np.interp( times.mjd, polar_motion_data["MJD"].value, polar_motion_data["dY_2000A_B"].value, left=0.0, right=0.0, ) # Catch the case where we don't have CIP delta values yet (they don't typically # have predictive values like the polar motion does) delta_x_array[np.isnan(delta_x_array)] = 0.0 delta_y_array[np.isnan(delta_y_array)] = 0.0 for idx in range(len(times)): novas.cel_pole( tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx] ) # The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST), # in units of hours lst_array[reverse_inds == idx] = novas.sidereal_time( ut1_time_array[idx], 0.0, (tt_time_array[idx] - ut1_time_array[idx]) * 86400.0, ) # Add the telescope lon to convert from GAST to LAST (local) lst_array = np.mod(lst_array + (longitude / 15.0), 24.0) # Convert from hours back to rad lst_array *= np.pi / 12.0 return lst_array def _adj_list(vecs, tol, n_blocks=None): """Identify neighbors of each vec in vecs, to distance tol.""" n_items = len(vecs) max_items = 2 ** 10 # Max array size used is max_items**2. Avoid using > 1 GiB if n_blocks is None: n_blocks = max(n_items // max_items, 1) # We may sort blocks so that some pairs of blocks may be skipped. # Reorder vectors by x. order = np.argsort(vecs[:, 0]) blocks = np.array_split(order, n_blocks) adj = [{k} for k in range(n_items)] # Adjacency lists for b1 in blocks: for b2 in blocks: v1, v2 = vecs[b1], vecs[b2] # Check for no overlap, with tolerance. xmin1 = v1[0, 0] - tol xmax1 = v1[-1, 0] + tol xmin2 = v2[0, 0] - tol xmax2 = v2[-1, 0] + tol if max(xmin1, xmin2) > min(xmax1, xmax2): continue adj_mat = cdist(vecs[b1], vecs[b2]) < tol for bi, col in enumerate(adj_mat): adj[b1[bi]] = adj[b1[bi]].union(b2[col]) return [frozenset(g) for g in adj] def _find_cliques(adj, strict=False): n_items = len(adj) loc_gps = [] visited = np.zeros(n_items, dtype=bool) for k in range(n_items): if visited[k]: continue a0 = adj[k] visited[k] = True if all(adj[it].__hash__() == a0.__hash__() for it in a0): group = list(a0) group.sort() visited[list(a0)] = True loc_gps.append(group) # Require all adjacency lists to be isolated maximal cliques: if strict: if not all(sorted(st) in loc_gps for st in adj): raise ValueError("Non-isolated cliques found in graph.") return loc_gps def find_clusters(location_ids, location_vectors, tol, strict=False): """ Find clusters of vectors (e.g. redundant baselines, times). Parameters ---------- location_ids : array_like of int ID labels for locations. location_vectors : array_like of float location vectors, can be multidimensional tol : float tolerance for clusters strict : bool Require that all adjacency lists be isolated maximal cliques. This ensures that vectors do not fall into multiple clusters. Default: False Returns ------- list of list of location_ids """ location_vectors = np.asarray(location_vectors) location_ids = np.asarray(location_ids) if location_vectors.ndim == 1: location_vectors = location_vectors[:, np.newaxis] adj = _adj_list(location_vectors, tol) # adj = list of sets loc_gps = _find_cliques(adj, strict=strict) loc_gps = [np.sort(location_ids[gp]).tolist() for gp in loc_gps] return loc_gps def get_baseline_redundancies( baselines, baseline_vecs, tol=1.0, include_conjugates=False, with_conjugates=False ): """ Find redundant baseline groups. Parameters ---------- baselines : array_like of int Baseline numbers, shape (Nbls,) baseline_vecs : array_like of float Baseline vectors in meters, shape (Nbls, 3) tol : float Absolute tolerance of redundancy, in meters. include_conjugates : bool Option to include baselines that are redundant when flipped. with_conjugates : bool Deprecated, use `include_conjugates` instead. Option to include baselines that are redundant when flipped. Returns ------- baseline_groups : list of lists of int list of lists of redundant baseline numbers vec_bin_centers : list of array_like of float List of vectors describing redundant group centers lengths : list of float List of redundant group baseline lengths in meters baseline_ind_conj : list of int List of baselines that are redundant when reversed. Only returned if include_conjugates is True """ Nbls = baselines.shape[0] if not baseline_vecs.shape == (Nbls, 3): raise ValueError("Baseline vectors must be shape (Nbls, 3)") baseline_vecs = copy.copy(baseline_vecs) # Protect the vectors passed in. if with_conjugates: warnings.warn( "The with_conjugates keyword is deprecated and will be removed in " "version 2.4. Use include_conjugates instead.", DeprecationWarning, ) include_conjugates = True if include_conjugates: conjugates = [] for bv in baseline_vecs: uneg = bv[0] < -tol uzer = np.isclose(bv[0], 0.0, atol=tol) vneg = bv[1] < -tol vzer = np.isclose(bv[1], 0.0, atol=tol) wneg = bv[2] < -tol conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg)) conjugates = np.array(conjugates, dtype=bool) baseline_vecs[conjugates] *= -1 baseline_ind_conj = baselines[conjugates] bl_gps, vec_bin_centers, lens = get_baseline_redundancies( baselines, baseline_vecs, tol=tol, include_conjugates=False ) return bl_gps, vec_bin_centers, lens, baseline_ind_conj try: bl_gps = find_clusters(baselines, baseline_vecs, tol, strict=True) except ValueError as exc: raise ValueError( "Some baselines are falling into multiple" " redundant groups. Lower the tolerance to resolve ambiguity." ) from exc n_unique = len(bl_gps) vec_bin_centers = np.zeros((n_unique, 3)) for gi, gp in enumerate(bl_gps): inds = [np.where(i == baselines)[0] for i in gp] vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0) lens = np.sqrt(np.sum(vec_bin_centers ** 2, axis=1)) return bl_gps, vec_bin_centers, lens def get_antenna_redundancies( antenna_numbers, antenna_positions, tol=1.0, include_autos=False ): """ Find redundant baseline groups based on antenna positions. Parameters ---------- antenna_numbers : array_like of int Antenna numbers, shape (Nants,). antenna_positions : array_like of float Antenna position vectors in the ENU (topocentric) frame in meters, shape (Nants, 3). tol : float Redundancy tolerance in meters. include_autos : bool Option to include autocorrelations. Returns ------- baseline_groups : list of lists of int list of lists of redundant baseline numbers vec_bin_centers : list of array_like of float List of vectors describing redundant group centers lengths : list of float List of redundant group baseline lengths in meters Notes ----- The baseline numbers refer to antenna pairs (a1, a2) such that the baseline vector formed from ENU antenna positions, blvec = enu[a1] - enu[a2] is close to the other baselines in the group. This is achieved by putting baselines in a form of the u>0 convention, but with a tolerance in defining the signs of vector components. To guarantee that the same baseline numbers are present in a UVData object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is the tolerance used here. """ Nants = antenna_numbers.size bls = [] bl_vecs = [] for aj in range(Nants): mini = aj + 1 if include_autos: mini = aj for ai in range(mini, Nants): anti, antj = antenna_numbers[ai], antenna_numbers[aj] bidx = antnums_to_baseline(antj, anti, Nants) bv = antenna_positions[ai] - antenna_positions[aj] bl_vecs.append(bv) bls.append(bidx) bls = np.array(bls) bl_vecs = np.array(bl_vecs) gps, vecs, lens, conjs = get_baseline_redundancies( bls, bl_vecs, tol=tol, include_conjugates=True ) # Flip the baselines in the groups. for gi, gp in enumerate(gps): for bi, bl in enumerate(gp): if bl in conjs: gps[gi][bi] = baseline_index_flip(bl, Nants) return gps, vecs, lens def mean_collapse( arr, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse by averaging data. This is similar to np.average, except it handles infs (by giving them zero weight) and zero weight axes (by forcing result to be inf with zero output weight). Parameters ---------- arr : array Input array to process. weights: ndarray, optional weights for average. If none, will default to equal weight for all non-infinite data. axis : int or tuple, optional Axis or axes to collapse (passed to np.sum). Default is all. return_weights : bool Whether to return sum of weights. return_weights_square: bool Whether to return the sum of the square of the weights. Default is False. """ arr = copy.deepcopy(arr) # avoid changing outside if weights is None: weights = np.ones_like(arr) else: weights = copy.deepcopy(weights) weights = weights * np.logical_not(np.isinf(arr)) arr[np.isinf(arr)] = 0 weight_out = np.sum(weights, axis=axis) if return_weights_square: weights_square = weights ** 2 weights_square_out = np.sum(weights_square, axis=axis) out = np.sum(weights * arr, axis=axis) where = weight_out > 1e-10 out = np.true_divide(out, weight_out, where=where) out = np.where(where, out, np.inf) if return_weights and return_weights_square: return out, weight_out, weights_square_out elif return_weights: return out, weight_out elif return_weights_square: return out, weights_square_out else: return out def absmean_collapse( arr, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse by averaging absolute value of data. Parameters ---------- arr : array Input array to process. weights: ndarray, optional weights for average. If none, will default to equal weight for all non-infinite data. axis : int or tuple, optional Axis or axes to collapse (passed to np.sum). Default is all. return_weights : bool Whether to return sum of weights. return_weights_square: bool whether to return the sum of the squares of the weights. Default is False. """ return mean_collapse( np.abs(arr), weights=weights, axis=axis, return_weights=return_weights, return_weights_square=return_weights_square, ) def quadmean_collapse( arr, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse by averaging in quadrature. Parameters ---------- arr : array Input array to process. weights: ndarray, optional weights for average. If none, will default to equal weight for all non-infinite data. axis : int or tuple, optional Axis or axes to collapse (passed to np.sum). Default is all. return_weights : bool Whether to return sum of weights. return_weights_square: bool whether to return the sum of the squares of the weights. Default is False. """ out = mean_collapse( np.abs(arr) ** 2, weights=weights, axis=axis, return_weights=return_weights, return_weights_square=return_weights_square, ) if return_weights and return_weights_square: return np.sqrt(out[0]), out[1], out[2] elif return_weights or return_weights_square: return np.sqrt(out[0]), out[1] else: return np.sqrt(out) def or_collapse( arr, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse using OR operation. Parameters ---------- arr : array Input array to process. weights: ndarray, optional NOT USED, but kept for symmetry with other collapsing functions. axis : int or tuple, optional Axis or axes to collapse (take OR over). Default is all. return_weights : bool Whether to return dummy weights array. NOTE: the dummy weights will simply be an array of ones return_weights_square: bool NOT USED, but kept for symmetry with other collapsing functions. """ if arr.dtype != np.bool_: raise ValueError("Input to or_collapse function must be boolean array") out = np.any(arr, axis=axis) if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]): warnings.warn("Currently weights are not handled when OR-ing boolean arrays.") if return_weights: return out, np.ones_like(out, dtype=np.float64) else: return out def and_collapse( arr, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse using AND operation. Parameters ---------- arr : array Input array to process. weights: ndarray, optional NOT USED, but kept for symmetry with other collapsing functions. axis : int or tuple, optional Axis or axes to collapse (take AND over). Default is all. return_weights : bool Whether to return dummy weights array. NOTE: the dummy weights will simply be an array of ones return_weights_square: bool NOT USED, but kept for symmetry with other collapsing functions. """ if arr.dtype != np.bool_: raise ValueError("Input to and_collapse function must be boolean array") out = np.all(arr, axis=axis) if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]): warnings.warn("Currently weights are not handled when AND-ing boolean arrays.") if return_weights: return out, np.ones_like(out, dtype=np.float64) else: return out def collapse( arr, alg, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Parent function to collapse an array with a given algorithm. Parameters ---------- arr : array Input array to process. alg : str Algorithm to use. Must be defined in this function with corresponding subfunction above. weights: ndarray, optional weights for collapse operation (e.g. weighted mean). NOTE: Some subfunctions do not use the weights. See corresponding doc strings. axis : int or tuple, optional Axis or axes to collapse. Default is all. return_weights : bool Whether to return sum of weights. return_weights_square: bool Whether to return the sum of the squares of the weights. Default is False. """ collapse_dict = { "mean": mean_collapse, "absmean": absmean_collapse, "quadmean": quadmean_collapse, "or": or_collapse, "and": and_collapse, } try: out = collapse_dict[alg]( arr, weights=weights, axis=axis, return_weights=return_weights, return_weights_square=return_weights_square, ) except KeyError: raise ValueError( "Collapse algorithm must be one of: " + ", ".join(collapse_dict.keys()) + "." ) return out def uvcalibrate( uvdata, uvcal, inplace=True, prop_flags=True, Dterm_cal=False, flip_gain_conj=False, delay_convention="minus", undo=False, time_check=True, ant_check=True, ): """ Calibrate a UVData object with a UVCal object. Parameters ---------- uvdata : UVData object UVData object to calibrate. uvcal : UVCal object UVCal object containing the calibration. inplace : bool, optional if True edit uvdata in place, else return a calibrated copy prop_flags : bool, optional if True, propagate calibration flags to data flags and doesn't use flagged gains. Otherwise, uses flagged gains and does not propagate calibration flags to data flags. Dterm_cal : bool, optional Calibrate the off-diagonal terms in the Jones matrix if present in uvcal. Default is False. Currently not implemented. flip_gain_conj : bool, optional This function uses the UVData ant_1_array and ant_2_array to specify the antennas in the UVCal object. By default, the conjugation convention, which follows the UVData convention (i.e. ant2 - ant1), is that the applied gain = ant1_gain * conjugate(ant2_gain). If the other convention is required, set flip_gain_conj=True. delay_convention : str, optional Exponent sign to use in conversion of 'delay' to 'gain' cal_type if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'. undo : bool, optional If True, undo the provided calibration. i.e. apply the calibration with flipped gain_convention. Flag propagation rules apply the same. time_check : bool Option to check that times match between the UVCal and UVData objects if UVCal has a single time or time range. Times are always checked if UVCal has multiple times. ant_check : bool Option to check that all antennas with data on the UVData object have calibration solutions in the UVCal object. If this option is set to False, uvcalibrate will proceed without erroring and data for antennas without calibrations will be flagged. Returns ------- UVData, optional Returns if not inplace """ if uvcal.cal_type == "gain" and uvcal.wide_band: raise ValueError( "uvcalibrate currently does not support wide-band calibrations" ) if uvcal.cal_type == "delay" and uvcal.Nspws > 1: # To fix this, need to make UVCal.convert_to_gain support multiple spws raise ValueError( "uvcalibrate currently does not support multi spectral window delay " "calibrations" ) if not inplace: uvdata = uvdata.copy() # check both objects uvdata.check() uvcal.check() # Check whether the UVData antennas *that have data associated with them* # have associated data in the UVCal object uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array)) uvdata.antenna_names = np.asarray(uvdata.antenna_names) uvdata_used_antnames = np.array( [ uvdata.antenna_names[np.where(uvdata.antenna_numbers == antnum)][0] for antnum in uvdata_unique_nums ] ) uvcal_unique_nums = np.unique(uvcal.ant_array) uvcal.antenna_names = np.asarray(uvcal.antenna_names) uvcal_used_antnames = np.array( [ uvcal.antenna_names[np.where(uvcal.antenna_numbers == antnum)][0] for antnum in uvcal_unique_nums ] ) ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist() if not ant_arr_match: # check more carefully name_missing = [] for this_ant_name in uvdata_used_antnames: wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name) if wh_ant_match[0].size == 0: name_missing.append(this_ant_name) if len(name_missing) > 0: if len(name_missing) == uvdata_used_antnames.size: # all antenna_names with data on UVData are missing on UVCal. if not ant_check: warnings.warn( "All antenna names with data on UVData are missing " "on UVCal. Since ant_check is False, calibration will " "proceed but all data will be flagged." ) else: raise ValueError( "All antenna names with data on UVData are missing " "on UVCal. To continue with calibration " "(and flag all the data), set ant_check=False." ) else: # Only some antenna_names with data on UVData are missing on UVCal if not ant_check: warnings.warn( f"Antennas {name_missing} have data on UVData but are missing " "on UVCal. Since ant_check is False, calibration will " "proceed and the data for these antennas will be flagged." ) else: raise ValueError( f"Antennas {name_missing} have data on UVData but " "are missing on UVCal. To continue calibration and " "flag the data from missing antennas, set ant_check=False." ) uvdata_times = np.unique(uvdata.time_array) downselect_cal_times = False if uvcal.Ntimes > 1: if uvcal.Ntimes < uvdata.Ntimes: raise ValueError( "The uvcal object has more than one time but fewer than the " "number of unique times on the uvdata object." ) uvcal_times = np.unique(uvcal.time_array) try: time_arr_match = np.allclose( uvcal_times, uvdata_times, atol=uvdata._time_array.tols[1], rtol=uvdata._time_array.tols[0], ) except ValueError: time_arr_match = False if not time_arr_match: # check more carefully uvcal_times_to_keep = [] for this_time in uvdata_times: wh_time_match = np.nonzero( np.isclose( uvcal.time_array - this_time, 0, atol=uvdata._time_array.tols[1], rtol=uvdata._time_array.tols[0], ) ) if wh_time_match[0].size > 0: uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0]) else: raise ValueError( f"Time {this_time} exists on UVData but not on UVCal." ) if len(uvcal_times_to_keep) < uvcal.Ntimes: downselect_cal_times = True elif uvcal.time_range is None: # only one UVCal time, no time_range. # This cannot match if UVData.Ntimes > 1. # If they are both NTimes = 1, then check if they're close. if uvdata.Ntimes > 1 or not np.isclose( uvdata_times, uvcal.time_array, atol=uvdata._time_array.tols[1], rtol=uvdata._time_array.tols[0], ): if not time_check: warnings.warn( "Times do not match between UVData and UVCal " "but time_check is False, so calibration " "will be applied anyway." ) else: raise ValueError( "Times do not match between UVData and UVCal. " "Set time_check=False to apply calibration anyway." ) else: # time_array is length 1 and time_range exists: check uvdata_times in time_range if ( np.min(uvdata_times) < uvcal.time_range[0] or np.max(uvdata_times) > uvcal.time_range[1] ): if not time_check: warnings.warn( "Times do not match between UVData and UVCal " "but time_check is False, so calibration " "will be applied anyway." ) else: raise ValueError( "Times do not match between UVData and UVCal. " "Set time_check=False to apply calibration anyway. " ) downselect_cal_freq = False if uvdata.future_array_shapes: uvdata_freq_arr_use = uvdata.freq_array else: uvdata_freq_arr_use = uvdata.freq_array[0, :] if uvcal.future_array_shapes: uvcal_freq_arr_use = uvcal.freq_array else: uvcal_freq_arr_use = uvcal.freq_array[0, :] try: freq_arr_match = np.allclose( np.sort(uvcal_freq_arr_use), np.sort(uvdata_freq_arr_use), atol=uvdata._freq_array.tols[1], rtol=uvdata._freq_array.tols[0], ) except ValueError: freq_arr_match = False if freq_arr_match is False: # check more carefully uvcal_freqs_to_keep = [] for this_freq in uvdata_freq_arr_use: wh_freq_match = np.nonzero( np.isclose( uvcal.freq_array - this_freq, 0, atol=uvdata._freq_array.tols[1], rtol=uvdata._freq_array.tols[0], ) ) if wh_freq_match[0].size > 0: uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0]) else: raise ValueError( f"Frequency {this_freq} exists on UVData but not on UVCal." ) if len(uvcal_freqs_to_keep) < uvcal.Nfreqs: downselect_cal_freq = True # check if uvdata.x_orientation isn't set (it's required for uvcal) uvd_x = uvdata.x_orientation if uvd_x is None: # use the uvcal x_orientation throughout uvd_x = uvcal.x_orientation warnings.warn( "UVData object does not have `x_orientation` specified but UVCal does. " "Matching based on `x` and `y` only " ) uvdata_pol_strs = polnum2str(uvdata.polarization_array, x_orientation=uvd_x) uvcal_pol_strs = jnum2str(uvcal.jones_array, x_orientation=uvcal.x_orientation) uvdata_feed_pols = { feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol] } for feed in uvdata_feed_pols: # get diagonal jones str jones_str = parse_jpolstr(feed, x_orientation=uvcal.x_orientation) if jones_str not in uvcal_pol_strs: raise ValueError( f"Feed polarization {feed} exists on UVData but not on UVCal. " ) # downselect UVCal times, frequencies if downselect_cal_freq or downselect_cal_times: if not downselect_cal_times: uvcal_times_to_keep = None elif not downselect_cal_freq: uvcal_freqs_to_keep = None uvcal_use = uvcal.select( times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False ) new_uvcal = True else: uvcal_use = uvcal new_uvcal = False # input checks if uvcal_use.cal_type == "delay": if not new_uvcal: # make a copy to convert to gain uvcal_use = uvcal_use.copy() new_uvcal = True if uvdata.future_array_shapes: freq_array_use = uvdata.freq_array else: freq_array_use = uvdata.freq_array[0, :] if uvcal.future_array_shapes == uvdata.future_array_shapes: channel_width = uvdata.channel_width elif uvcal.future_array_shapes: channel_width = np.zeros(uvdata.Nfreqs, dtype=float) + uvdata.channel_width else: channel_width = uvdata.channel_width[0] uvcal_use.convert_to_gain( delay_convention=delay_convention, freq_array=freq_array_use, channel_width=channel_width, ) # D-term calibration if Dterm_cal: # check for D-terms if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array: raise ValueError( "Cannot apply D-term calibration without -7 or -8" "Jones polarization in uvcal object." ) raise NotImplementedError("D-term calibration is not yet implemented.") # No D-term calibration else: # key is number, value is name uvdata_ant_dict = dict(zip(uvdata.antenna_numbers, uvdata.antenna_names)) # opposite: key is name, value is number uvcal_ant_dict = dict(zip(uvcal.antenna_names, uvcal.antenna_numbers)) # iterate over keys for key in uvdata.get_antpairpols(): # get indices for this key blt_inds = uvdata.antpair2ind(key) pol_ind = np.argmin( np.abs(uvdata.polarization_array - polstr2num(key[2], uvd_x)) ) # try to get gains for each antenna ant1_num = key[0] ant2_num = key[1] feed1, feed2 = POL_TO_FEED_DICT[key[2]] try: uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]] except KeyError: uvcal_ant1_num = None try: uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]] except KeyError: uvcal_ant2_num = None uvcal_key1 = (uvcal_ant1_num, feed1) uvcal_key2 = (uvcal_ant2_num, feed2) if (uvcal_ant1_num is None or uvcal_ant2_num is None) or not ( uvcal_use._has_key(*uvcal_key1) and uvcal_use._has_key(*uvcal_key2) ): if uvdata.future_array_shapes: uvdata.flag_array[blt_inds, :, pol_ind] = True else: uvdata.flag_array[blt_inds, 0, :, pol_ind] = True continue if flip_gain_conj: gain = ( np.conj(uvcal_use.get_gains(uvcal_key1)) * uvcal_use.get_gains(uvcal_key2) ).T # tranpose to match uvdata shape else: gain = ( uvcal_use.get_gains(uvcal_key1) * np.conj(uvcal_use.get_gains(uvcal_key2)) ).T # tranpose to match uvdata shape flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T # propagate flags if prop_flags: mask = np.isclose(gain, 0.0) | flag gain[mask] = 1.0 if uvdata.future_array_shapes: uvdata.flag_array[blt_inds, :, pol_ind] += mask else: uvdata.flag_array[blt_inds, 0, :, pol_ind] += mask # apply to data mult_gains = uvcal_use.gain_convention == "multiply" if undo: mult_gains = not mult_gains if uvdata.future_array_shapes: if mult_gains: uvdata.data_array[blt_inds, :, pol_ind] *= gain else: uvdata.data_array[blt_inds, :, pol_ind] /= gain else: if mult_gains: uvdata.data_array[blt_inds, 0, :, pol_ind] *= gain else: uvdata.data_array[blt_inds, 0, :, pol_ind] /= gain # update attributes uvdata.history += "\nCalibrated with pyuvdata.utils.uvcalibrate." if undo: uvdata.vis_units = "uncalib" else: if uvcal_use.gain_scale is not None: uvdata.vis_units = uvcal_use.gain_scale if not inplace: return uvdata def apply_uvflag( uvd, uvf, inplace=True, unflag_first=False, flag_missing=True, force_pol=True ): """ Apply flags from a UVFlag to a UVData instantiation. Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across that axis. Parameters ---------- uvd : UVData object UVData object to add flags to. uvf : UVFlag object A UVFlag object in flag mode. inplace : bool If True overwrite flags in uvd, otherwise return new object unflag_first : bool If True, completely unflag the UVData before applying flags. Else, OR the inherent uvd flags with uvf flags. flag_missing : bool If input uvf is a baseline type and antpairs in uvd do not exist in uvf, flag them in uvd. Otherwise leave them untouched. force_pol : bool If True, broadcast flags to all polarizations if they do not match. Only works if uvf.Npols == 1. Returns ------- UVData If not inplace, returns new UVData object with flags applied """ # assertions if uvf.mode != "flag": raise ValueError("UVFlag must be flag mode") if not inplace: uvd = uvd.copy() # make a deepcopy by default b/c it is generally edited inplace downstream uvf = uvf.copy() # convert to baseline type if uvf.type != "baseline": # edits inplace uvf.to_baseline(uvd, force_pol=force_pol) else: # make sure polarizations match or force_pol uvd_pols, uvf_pols = ( uvd.polarization_array.tolist(), uvf.polarization_array.tolist(), ) if set(uvd_pols) != set(uvf_pols): if uvf.Npols == 1 and force_pol: # if uvf is 1pol we can make them match: also edits inplace uvf.polarization_array = uvd.polarization_array uvf.Npols = len(uvf.polarization_array) uvf_pols = uvf.polarization_array.tolist() else: raise ValueError("Input uvf and uvd polarizations do not match") # make sure polarization ordering is correct: also edits inplace uvf.polarization_array = uvf.polarization_array[ [uvd_pols.index(pol) for pol in uvf_pols] ] # check time and freq shapes match: if Ntimes or Nfreqs is 1, allow # implicit broadcasting if uvf.Ntimes == 1: mismatch_times = False elif uvf.Ntimes == uvd.Ntimes: tdiff = np.unique(uvf.time_array) - np.unique(uvd.time_array) mismatch_times = np.any(tdiff > np.max(np.abs(uvf._time_array.tols))) else: mismatch_times = True if mismatch_times: raise ValueError("UVFlag and UVData have mismatched time arrays.") if uvf.Nfreqs == 1: mismatch_freqs = False elif uvf.Nfreqs == uvd.Nfreqs: fdiff = np.unique(uvf.freq_array) - np.unique(uvd.freq_array) mismatch_freqs = np.any(fdiff > np.max(np.abs(uvf._freq_array.tols))) else: mismatch_freqs = True if mismatch_freqs: raise ValueError("UVFlag and UVData have mismatched frequency arrays.") # unflag if desired if unflag_first: uvd.flag_array[:] = False # iterate over antpairs and apply flags: TODO need to be able to handle # conjugated antpairs uvf_antpairs = uvf.get_antpairs() for ap in uvd.get_antpairs(): uvd_ap_inds = uvd.antpair2ind(ap) if ap not in uvf_antpairs: if flag_missing: uvd.flag_array[uvd_ap_inds] = True continue uvf_ap_inds = uvf.antpair2ind(*ap) # addition of boolean is OR if uvd.future_array_shapes: uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds, 0, :, :] else: uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds] uvd.history += "\nFlagged with pyuvdata.utils.apply_uvflags." if not inplace: return uvd def parse_ants(uv, ant_str, print_toggle=False, x_orientation=None): """ Get antpair and polarization from parsing an aipy-style ant string. Used to support the select function. Generates two lists of antenna pair tuples and polarization indices based on parsing of the string ant_str. If no valid polarizations (pseudo-Stokes params, or combinations of [lr] or [xy]) or antenna numbers are found in ant_str, ant_pairs_nums and polarizations are returned as None. Parameters ---------- uv : UVBase Object A UVBased object that supports the following functions and parameters: - get_ants - get_antpairs - get_pols These are used to construct the baseline ant_pair_nums and polarizations returned. ant_str : str String containing antenna information to parse. Can be 'all', 'auto', 'cross', or combinations of antenna numbers and polarization indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used in front of an antenna number or baseline to exclude it from being output in ant_pairs_nums. If ant_str has a minus sign as the first character, 'all,' will be appended to the beginning of the string. See the tutorial for examples of valid strings and their behavior. print_toggle : bool Boolean for printing parsed baselines for a visual user check. x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization ("east" or "north") to allow for converting from E/N strings. If input uv object has an `x_orientation` parameter and the input to this function is `None`, the value from the object will be used. Any input given to this function will override the value on the uv object. See corresonding parameter on UVData for more details. Returns ------- ant_pairs_nums : list of tuples of int or None List of tuples containing the parsed pairs of antenna numbers, or None if ant_str is 'all' or a pseudo-Stokes polarizations. polarizations : list of int or None List of desired polarizations or None if ant_str does not contain a polarization specification. """ required_attrs = ["get_ants", "get_antpairs", "get_pols"] if not all(hasattr(uv, attr) for attr in required_attrs): raise ValueError( "UVBased objects must have all the following attributes in order " f"to call 'parse_ants': {required_attrs}." ) if x_orientation is None and ( hasattr(uv, "x_orientation") and uv.x_orientation is not None ): x_orientation = uv.x_orientation ant_re = r"(\(((-?\d+[lrxy]?,?)+)\)|-?\d+[lrxy]?)" bl_re = "(^(%s_%s|%s),?)" % (ant_re, ant_re, ant_re) str_pos = 0 ant_pairs_nums = [] polarizations = [] ants_data = uv.get_ants() ant_pairs_data = uv.get_antpairs() pols_data = uv.get_pols() warned_ants = [] warned_pols = [] if ant_str.startswith("-"): ant_str = "all," + ant_str while str_pos < len(ant_str): m = re.search(bl_re, ant_str[str_pos:]) if m is None: if ant_str[str_pos:].upper().startswith("ALL"): if len(ant_str[str_pos:].split(",")) > 1: ant_pairs_nums = uv.get_antpairs() elif ant_str[str_pos:].upper().startswith("AUTO"): for pair in ant_pairs_data: if pair[0] == pair[1] and pair not in ant_pairs_nums: ant_pairs_nums.append(pair) elif ant_str[str_pos:].upper().startswith("CROSS"): for pair in ant_pairs_data: if not (pair[0] == pair[1] or pair in ant_pairs_nums): ant_pairs_nums.append(pair) elif ant_str[str_pos:].upper().startswith("PI"): polarizations.append(polstr2num("pI")) elif ant_str[str_pos:].upper().startswith("PQ"): polarizations.append(polstr2num("pQ")) elif ant_str[str_pos:].upper().startswith("PU"): polarizations.append(polstr2num("pU")) elif ant_str[str_pos:].upper().startswith("PV"): polarizations.append(polstr2num("pV")) else: raise ValueError("Unparsible argument {s}".format(s=ant_str)) comma_cnt = ant_str[str_pos:].find(",") if comma_cnt >= 0: str_pos += comma_cnt + 1 else: str_pos = len(ant_str) else: m = m.groups() str_pos += len(m[0]) if m[2] is None: ant_i_list = [m[8]] ant_j_list = list(uv.get_ants()) else: if m[3] is None: ant_i_list = [m[2]] else: ant_i_list = m[3].split(",") if m[6] is None: ant_j_list = [m[5]] else: ant_j_list = m[6].split(",") for ant_i in ant_i_list: include_i = True if type(ant_i) == str and ant_i.startswith("-"): ant_i = ant_i[1:] # nibble the - off the string include_i = False for ant_j in ant_j_list: include_j = True if type(ant_j) == str and ant_j.startswith("-"): ant_j = ant_j[1:] include_j = False pols = None ant_i, ant_j = str(ant_i), str(ant_j) if not ant_i.isdigit(): ai = re.search(r"(\d+)([x,y,l,r])", ant_i).groups() if not ant_j.isdigit(): aj = re.search(r"(\d+)([x,y,l,r])", ant_j).groups() if ant_i.isdigit() and ant_j.isdigit(): ai = [ant_i, ""] aj = [ant_j, ""] elif ant_i.isdigit() and not ant_j.isdigit(): if "x" in ant_j or "y" in ant_j: pols = ["x" + aj[1], "y" + aj[1]] else: pols = ["l" + aj[1], "r" + aj[1]] ai = [ant_i, ""] elif not ant_i.isdigit() and ant_j.isdigit(): if "x" in ant_i or "y" in ant_i: pols = [ai[1] + "x", ai[1] + "y"] else: pols = [ai[1] + "l", ai[1] + "r"] aj = [ant_j, ""] elif not ant_i.isdigit() and not ant_j.isdigit(): pols = [ai[1] + aj[1]] ant_tuple = (abs(int(ai[0])), abs(int(aj[0]))) # Order tuple according to order in object if ant_tuple in ant_pairs_data: pass elif ant_tuple[::-1] in ant_pairs_data: ant_tuple = ant_tuple[::-1] else: if not ( ant_tuple[0] in ants_data or ant_tuple[0] in warned_ants ): warned_ants.append(ant_tuple[0]) if not ( ant_tuple[1] in ants_data or ant_tuple[1] in warned_ants ): warned_ants.append(ant_tuple[1]) if pols is not None: for pol in pols: if not (pol.lower() in pols_data or pol in warned_pols): warned_pols.append(pol) continue if include_i and include_j: if ant_tuple not in ant_pairs_nums: ant_pairs_nums.append(ant_tuple) if pols is not None: for pol in pols: if ( pol.lower() in pols_data and polstr2num(pol, x_orientation=x_orientation) not in polarizations ): polarizations.append( polstr2num(pol, x_orientation=x_orientation) ) elif not ( pol.lower() in pols_data or pol in warned_pols ): warned_pols.append(pol) else: if pols is not None: for pol in pols: if pol.lower() in pols_data: if uv.Npols == 1 and [pol.lower()] == pols_data: ant_pairs_nums.remove(ant_tuple) if ( polstr2num(pol, x_orientation=x_orientation) in polarizations ): polarizations.remove( polstr2num( pol, x_orientation=x_orientation, ) ) elif not ( pol.lower() in pols_data or pol in warned_pols ): warned_pols.append(pol) elif ant_tuple in ant_pairs_nums: ant_pairs_nums.remove(ant_tuple) if ant_str.upper() == "ALL": ant_pairs_nums = None elif len(ant_pairs_nums) == 0: if not ant_str.upper() in ["AUTO", "CROSS"]: ant_pairs_nums = None if len(polarizations) == 0: polarizations = None else: polarizations.sort(reverse=True) if print_toggle: print("\nParsed antenna pairs:") if ant_pairs_nums is not None: for pair in ant_pairs_nums: print(pair) print("\nParsed polarizations:") if polarizations is not None: for pol in polarizations: print(polnum2str(pol, x_orientation=x_orientation)) if len(warned_ants) > 0: warnings.warn( "Warning: Antenna number {a} passed, but not present " "in the ant_1_array or ant_2_array".format( a=(",").join(map(str, warned_ants)) ) ) if len(warned_pols) > 0: warnings.warn( "Warning: Polarization {p} is not present in " "the polarization_array".format(p=(",").join(warned_pols).upper()) ) return ant_pairs_nums, polarizations def _combine_filenames(filename1, filename2): """Combine the filename attribute from multiple UVBase objects. The 4 cases are: 1. `filename1` has been set, `filename2` has not 2. `filename1` has not been set, `filename2` has 3. `filename1` and `filename2` both have been set 4. `filename1` and `filename2` both have not been set In case (1), we do not want to update the attribute, because it is already set correctly. In case (2), we want to replace `filename1` with the value from `filename2. In case (3), we want to take the union of the sets of the filenames. In case (4), we want the filename attribute to still be `None`. Parameters ---------- filename1 : list of str or None The list of filenames for the first UVBase object. If it is not set, it should be `None`. filename2 : list of str or None The list of filenames for the second UVData object. If it is not set, it should be `None`. Returns ------- combined_filenames : list of str or None The combined list, with potentially duplicate entries removed. """ combined_filenames = filename1 if filename1 is not None: if filename2 is not None: combined_filenames = sorted(set(filename1).union(set(filename2))) elif filename2 is not None: combined_filenames = filename2 return combined_filenames def _get_dset_shape(dset, indices): """ Given a 3-tuple of indices, determine the indexed array shape. Parameters ---------- dset : numpy array or h5py dataset A numpy array or a reference to an HDF5 dataset on disk. Requires the `dset.shape` attribute exists and returns a tuple. indices : tuple A 3-tuple with the indices to extract along each dimension of dset. Each element should contain a list of indices, a slice element, or a list of slice elements that will be concatenated after slicing. For data arrays with 4 dimensions, the second dimension (the old spw axis) should not be included because it can only be length one. Returns ------- tuple a 3- or 4-tuple with the shape of the indexed array tuple a 3- or 4-tuple with indices used (will be different than input if dset has 4 dimensions) """ dset_shape = list(dset.shape) if len(dset_shape) == 4 and len(indices) == 3: indices = (indices[0], np.s_[:], indices[1], indices[2]) for i, inds in enumerate(indices): # check for integer if isinstance(inds, (int, np.integer)): dset_shape[i] = 1 # check for slice object if isinstance(inds, slice): dset_shape[i] = _get_slice_len(inds, dset_shape[i]) # check for list if isinstance(inds, list): # check for list of integers if isinstance(inds[0], (int, np.integer)): dset_shape[i] = len(inds) elif isinstance(inds[0], slice): dset_shape[i] = sum((_get_slice_len(s, dset_shape[i]) for s in inds)) return dset_shape, indices def _convert_to_slices(indices, max_nslice_frac=0.1): """ Convert list of indices to a list of slices. Parameters ---------- indices : list A 1D list of integers for array indexing. max_nslice_frac : float A float from 0 -- 1. If the number of slices needed to represent input 'indices' divided by len(indices) exceeds this fraction, then we determine that we cannot easily represent 'indices' with a list of slices. Returns ------- list list of slice objects used to represent indices bool If True, indices is easily represented by slices (max_nslice_frac condition met), otherwise False Notes ----- Example: if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14] then: slices = [slice(1, 5, 1), slice(11, 15, 1)] """ # check for integer index if isinstance(indices, (int, np.integer)): indices = [indices] # check for already a slice if isinstance(indices, slice): return [indices], True # assert indices is longer than 2, or return trivial solutions if len(indices) == 0: return [slice(0, 0, 0)], False elif len(indices) == 1: return [slice(indices[0], indices[0] + 1, 1)], True elif len(indices) == 2: return [slice(indices[0], indices[1] + 1, indices[1] - indices[0])], True # setup empty slices list Ninds = len(indices) slices = [] # iterate over indices for i, ind in enumerate(indices): if i == 0: # start the first slice object start = ind last_step = indices[i + 1] - ind continue # calculate step from previous index step = ind - indices[i - 1] # if step != last_step, this ends the slice if step != last_step: # append to list slices.append(slice(start, indices[i - 1] + 1, last_step)) # check if this is the last element if i == Ninds - 1: # append last element slices.append(slice(ind, ind + 1, 1)) continue # setup next step start = ind last_step = indices[i + 1] - ind # check if this is the last element elif i == Ninds - 1: # end slice and append slices.append(slice(start, ind + 1, step)) # determine whether slices are a reasonable representation Nslices = len(slices) passed = (float(Nslices) / len(indices)) < max_nslice_frac return slices, passed def _get_slice_len(s, axlen): """ Get length of a slice s into array of len axlen. Parameters ---------- s : slice object Slice object to index with axlen : int Length of axis s slices into Returns ------- int Length of slice object """ if s.start is None: start = 0 else: start = s.start if s.stop is None: stop = axlen else: stop = np.min([s.stop, axlen]) if s.step is None: step = 1 else: step = s.step return ((stop - 1 - start) // step) + 1 def _index_dset(dset, indices, input_array=None): """ Index a UVH5 data, flags or nsamples h5py dataset. Parameters ---------- dset : h5py dataset A reference to an HDF5 dataset on disk. indices : tuple A 3-tuple with the indices to extract along each dimension of dset. Each element should contain a list of indices, a slice element, or a list of slice elements that will be concatenated after slicing. Indices must be provided such that all dimensions can be indexed simultaneously. For data arrays with 4 dimensions, the second dimension (the old spw axis) should not be included because it can only be length one. Returns ------- ndarray The indexed dset Notes ----- This makes and fills an empty array with dset indices. For trivial indexing, (e.g. a trivial slice), constructing a new array and filling it is suboptimal over direct indexing, e.g. dset[indices]. This function specializes in repeated slices over the same axis, e.g. if indices is [[slice(0, 5), slice(10, 15), ...], ..., ] """ # get dset and arr shape dset_shape = dset.shape arr_shape, indices = _get_dset_shape(dset, indices) if input_array is None: # create empty array of dset dtype arr = np.empty(arr_shape, dtype=dset.dtype) else: arr = input_array # get arr and dset indices for each dimension in indices dset_indices = [] arr_indices = [] for i, dset_inds in enumerate(indices): if isinstance(dset_inds, (int, np.integer)): # this dimension is len 1, so slice is fine arr_indices.append([slice(None)]) dset_indices.append([[dset_inds]]) elif isinstance(dset_inds, slice): # this dimension is just a slice, so slice is fine arr_indices.append([slice(None)]) dset_indices.append([dset_inds]) elif isinstance(dset_inds, (list, np.ndarray)): if isinstance(dset_inds[0], (int, np.integer)): # this is a list of integers, append slice arr_indices.append([slice(None)]) dset_indices.append([dset_inds]) elif isinstance(dset_inds[0], slice): # this is a list of slices, need list of slice lens slens = [_get_slice_len(s, dset_shape[i]) for s in dset_inds] ssums = [sum(slens[:j]) for j in range(len(slens))] arr_inds = [slice(s, s + l) for s, l in zip(ssums, slens)] arr_indices.append(arr_inds) dset_indices.append(dset_inds) if len(dset_shape) == 3: freq_dim = 1 pol_dim = 2 else: freq_dim = 2 pol_dim = 3 # iterate over each of the 3 axes and fill the array for blt_arr, blt_dset in zip(arr_indices[0], dset_indices[0]): for freq_arr, freq_dset in zip(arr_indices[freq_dim], dset_indices[freq_dim]): for pol_arr, pol_dset in zip(arr_indices[pol_dim], dset_indices[pol_dim]): if input_array is None: # index dset and assign to arr if len(dset_shape) == 3: arr[blt_arr, freq_arr, pol_arr] = dset[ blt_dset, freq_dset, pol_dset ] else: arr[blt_arr, :, freq_arr, pol_arr] = dset[ blt_dset, :, freq_dset, pol_dset ] else: # index arr and assign to dset if len(dset_shape) == 3: dset[blt_dset, freq_dset, pol_dset] = arr[ blt_arr, freq_arr, pol_arr ] else: dset[blt_dset, :, freq_dset, pol_dset] = arr[ blt_arr, :, freq_arr, pol_arr ] if input_array is None: return arr else: return
{ "content_hash": "ae9c36d4fd32155258be796d4b976b57", "timestamp": "", "source": "github", "line_count": 5017, "max_line_length": 88, "avg_line_length": 38.117600159457844, "alnum_prop": 0.5972777092179297, "repo_name": "HERA-Team/pyuvdata", "id": "ae5059aa3bc85a041bf546e62f0d572798562433", "size": "191370", "binary": false, "copies": "1", "ref": "refs/heads/fix_uvh5_phase_info", "path": "pyuvdata/utils.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "3599" }, { "name": "C", "bytes": "305381" }, { "name": "C++", "bytes": "33406" }, { "name": "IDL", "bytes": "6021" }, { "name": "Objective-C", "bytes": "1706" }, { "name": "PowerShell", "bytes": "2972" }, { "name": "Python", "bytes": "1140005" }, { "name": "Shell", "bytes": "391" }, { "name": "TeX", "bytes": "3991" } ], "symlink_target": "" }
from .recovery_services_backup_client import RecoveryServicesBackupClient from .version import VERSION __all__ = ['RecoveryServicesBackupClient'] __version__ = VERSION
{ "content_hash": "faecb662679f16192d385dcf54533948", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 73, "avg_line_length": 24.428571428571427, "alnum_prop": 0.7894736842105263, "repo_name": "v-iam/azure-sdk-for-python", "id": "dc0eafa315788627630e484ed7bc02cb7da77b02", "size": "645", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "19856874" } ], "symlink_target": "" }
"""module_builder Use configuration files to generate verilog modules """ """ Changes: 9/16/2013: -Initial Commit """ __author__ = 'dave.mccoy@cospandesign.com (Dave McCoy)' import os import sys import string import json import copy #Project Modules import utils import verilog_utils as vutils from verilog_utils import get_eol import ibuilder_error class ModuleBuilderError(ibuilder_error.IBuilderError): pass def dict_to_signal(name, d): if d["size"] == 1: return name return "%s[%d:%d]" % (name, d["max_val"], d["min_val"]) def generate_module_ports(module_name, port_dict, param_dict = {}, debug = False): """ Generates the name, parameters and port declarations of a verilog module Args: module_name (string): Name of the module to instantiate port_dict (dictionary): Port dictionary Format of port_dict: { "input":{ "clk":{ "size":1 }, "rst":{ "size":1 }, "stimulus":{ "size":1 }, "array":{ "size":1 "max_val":31, "min_val":0 }, "button":{ "size":1 "max_val":3, "min_val":0 } }, "output":{ "out1":{ "size":1 }, "led":{ "size":1 "max_val":3, "min_val":0 } }, "inout":{ "inout_test":{ "size":1 }, "inout":{ "size":1 "max_val":5, "min_val":1 } } } param_dict (dictionay): Parameter dictionary Format of param_dict { "PARAMETER1":"1", "PARAMETER2":"4" } Returns: (string): buffer instantiation Raises: ModuleBuilderError: -module_name is not a string -param dictionary incorrectly formatted -port dictionary incorrectly formatted """ if not isinstance(module_name, str): raise ModuleBuilderError("module_builder: module_name is not a string") buf = "" if len(param_dict.keys()) > 0: num_params = len(param_dict.keys()) param_count = 1 buf = "module %s #(\n" % module_name for param in param_dict: buf += "\t{0:10}{1:12}{2:10}{3}{4}\n".format("parameter", param, "=", param_dict[param], get_eol(num_params, param_count)) param_count += 1 buf += ")(\n" else: buf = "module %s(\n" % module_name input_ports =[] output_ports = [] inout_ports = [] port_count = 0 if "input" in port_dict: for port in port_dict["input"]: input_ports.append(port) if "output" in port_dict: for port in port_dict["output"]: output_ports.append(port) if "inout" in port_dict: for port in port_dict["inout"]: inout_ports.append(port) #Sort the signals input_ports = sorted(input_ports, cmp = vutils.port_cmp) output_ports = sorted(output_ports, cmp = vutils.port_cmp) inout_ports = sorted(inout_ports, cmp = vutils.port_cmp) num_ports = len(input_ports) + len(output_ports) + len(inout_ports) port_count = 1 port_name = "" if debug: print "input ports: %s" % str(input_ports) #This is a special case which should not handle an array if "clk" in input_ports: buf += "\t{0:22}{1}{2}\n".format("input", "clk", get_eol(num_ports, port_count)) port_count += 1 if "rst" in input_ports: buf += "\t{0:22}{1}{2}\n".format("input", "rst", get_eol(num_ports, port_count)) port_count += 1 if port_count != len(input_ports): buf += "\n" buf += "\t//inputs\n" for port in input_ports: if port == "clk": continue if port == "rst": continue port_sig = dict_to_signal(port, port_dict["input"][port]) if "[" in port_sig and ":" in port_sig: port_name = "\t{0:10}[{1:11}{2}{3}\n".format( "input", port_sig.partition("[")[2], port_sig.partition("[")[0], get_eol(num_ports, port_count)) port_count += 1 else: port_name = "\t{0:22}{1}{2}\n".format("input", port_sig, get_eol(num_ports, port_count)) port_count += 1 buf += port_name if len(output_ports) > 0: buf += "\n" buf += "\t//outputs\n" for port in output_ports: port_sig = dict_to_signal(port, port_dict["output"][port]) if "[" in port_sig and ":" in port_sig: port_name = "\t{0:10}[{1:11}{2}{3}\n".format( "output", port_sig.partition("[")[2], port_sig.partition("[")[0], get_eol(num_ports, port_count)) port_count += 1 else: port_name = "\t{0:22}{1}{2}\n".format("output", port_sig, get_eol(num_ports, port_count)) port_count += 1 buf += port_name if len(inout_ports) > 0: buf += "\n" buf += "\t//inouts\n" for port in inout_ports: port_sig = dict_to_signal(port, port_dict["inout"][port]) if "[" in port_sig and ":" in port_sig: port_name = "\t{0:10}[{1:11}{2}{3}\n".format( "inout", port_sig.partition("[")[2], port_sig.partition("[")[0], get_eol(num_ports, port_count)) port_count += 1 else: port_name = "\t{0:22}{1}{2}\n".format("inout", port_sig, get_eol(num_ports, port_count)) port_count += 1 buf += port_name buf += ");\n" return string.expandtabs(buf, 2) def generate_defines_buf(defines_dict): """ XXX: This function is not ready to be used, the defines need to be organized (DO NOT USE) Generate a buffer with the specified defines Args: defines_dict (dictionary): list of define values in the format: 'name':'value' Returns: (string): buffer with the defines specified Raises: Nothing """ if len(defines_dict) == 0: return "" buf = "\n" for define in defines_dict: buf += "`define %s %s\n" % (define, defines_dict[define]) buf += "\n" return buf def generate_timespec_buf(step = "1 ns", unit = "1 ps"): """ Generate a timespec buffer given the input, if left empty fills in the default of 1ns/1ps Args: step (string): Timespec step unit (string): Unit of time step Returns: (string): buffer with the given timespec Raises: Nothing """ buf = "\n" buf += "`timescale %s/%s\n" % (step, unit) buf += "\n" return buf class ModuleBuilder(object): """Class used to build a generic verilog module given a configuratiom file""" def __init__(self, tags = None): self.tags = tags self.wires = {} self.bindings = {} self.user_paths = [] self.submodule_buffers = [] def add_ports_to_wires(self): """Add all input and output wires to the ports""" if "input" in self.tags["ports"]: for port in self.tags["ports"]["input"]: #print "Adding %s to wires" % port self.wires[port] = self.tags["ports"]["input"][port] if "output" in self.tags["ports"]: for port in self.tags["ports"]["output"]: self.wires[port] = self.tags["ports"]["output"][port] def generate_module_wires(self, invert_reset): buf = "" if invert_reset: buf += vutils.create_wire_buf("rst_n", 1, 0, 0) return buf def generate_module(self, name, tags = None, invert_reset = False, debug = False): self.wires = {} self.bindings = {} self.submodule_buffers = [] if tags: self.tags = tags #Add the ports to wires self.add_ports_to_wires() #Generate the submodules if "submodules" in self.tags: for submodule in self.tags["submodules"]: sub = self.generate_sub_module(invert_reset, submodule, self.tags["submodules"][submodule], debug = False) self.submodule_buffers.append(sub) #Generate the bindings or assignments for the submodules assign_buf = vutils.generate_assigns_buffer(invert_reset, bindings = self.bindings, internal_bindings = {}, debug = False) wire_buf = self.generate_module_wires(invert_reset) buf = generate_timespec_buf() buf = "" param_dict = {} if "parameters" in self.tags: param_dict = self.tags["parameters"] buf += generate_module_ports(module_name = name, port_dict = self.tags["ports"], param_dict = param_dict, debug = False) buf += "\n" buf += "//local parameters\n" buf += "\n" buf += "//registers/wires\n" buf += wire_buf buf += "\n" buf += "//submodules\n" buf += "\n" for sub in self.submodule_buffers: buf += sub buf += "\n" buf += "\n" buf += assign_buf buf += "//asynchronous logic\n" buf += "//synchronous logic\n" buf += "\n" buf += "endmodule" return buf def generate_sub_module_wires(self, invert_reset, instance_name, module_tags): #Add all input and output wires to the ports buf = "" if "input" in module_tags["ports"]: buf += "//inputs\n" for port in module_tags["ports"]["input"]: if port == "clk": continue if port == "rst": continue pname = port if len(instance_name) > 0: pname = "%s_%s" % (instance_name, port) if self.in_wires(pname, module_tags["ports"]["input"][port]): continue buf += vutils.create_wire_buf_from_dict(pname, module_tags["ports"]["input"][port]) self.add_wire(pname, module_tags["ports"]["input"][port]) buf += "\n" if "output" in module_tags["ports"]: buf += "//outputs\n" for port in module_tags["ports"]["output"]: pname = port if len(instance_name) > 0: pname = "%s_%s" % (instance_name, port) if self.in_wires(pname, module_tags["ports"]["output"][port]): continue buf += vutils.create_wire_buf_from_dict(pname, module_tags["ports"]["output"][port]) self.add_wire(pname, module_tags["ports"]["output"][port]) buf += "\n" return buf def generate_sub_module(self, invert_reset, instance_name, config_tags, module_tags = None, enable_unique_ports = True, debug = False): if module_tags is None: filepath = utils.find_rtl_file_location(config_tags["filename"], self.user_paths) module_tags = vutils.get_module_tags(filepath, user_paths = self.user_paths) #if debug: #print "Module Tags:" #utils.pretty_print_dict(module_tags) buf = "//Module %s ( %s )\n" % (module_tags["module"], instance_name) buf += "\n" prename = "" if enable_unique_ports: prename = instance_name buf += self.generate_sub_module_wires(invert_reset, prename, module_tags) buf += vutils.generate_module_port_signals(invert_reset = invert_reset, name = instance_name, prename = prename, slave_tags = config_tags, module_tags = module_tags) #Add the bindings for this modules to the bind dictionary if "bind" in config_tags: for bind in config_tags["bind"]: bname = bind if len(prename) > 0: bname = "%s_%s" % (prename, bind) self.bindings[bname] = {} self.bindings[bname] = config_tags["bind"][bind] return buf def in_wires(self, signal_name, signal_dict): if signal_name not in self.wires.keys(): return False wire_dict = self.wires[signal_name] if signal_dict["size"] == wire_dict["size"]: return True if signal_dict["size"] < wire_dict["size"]: return True return False def add_wire(self, signal_name, signal_dict): if signal_name not in self.wires.keys(): self.wires[signal_name] = signal_dict return
{ "content_hash": "75032c29011629d7ebc681e58a9d9b9f", "timestamp": "", "source": "github", "line_count": 474, "max_line_length": 93, "avg_line_length": 33.426160337552744, "alnum_prop": 0.41788689724816963, "repo_name": "CospanDesign/nysa", "id": "e174e64acbaf4d870ba672064513127bc699ca8c", "size": "16623", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nysa/ibuilder/sim/module_builder.py", "mode": "33188", "license": "mit", "language": [ { "name": "Coq", "bytes": "6020" }, { "name": "Makefile", "bytes": "1167" }, { "name": "Python", "bytes": "1238364" }, { "name": "Shell", "bytes": "186" }, { "name": "Verilog", "bytes": "235254" } ], "symlink_target": "" }
""" """ __docformat__ = 'restructuredtext' __version__ = '$Id: $' import sys from . import lib_pulseaudio as pa import pyglet _debug = pyglet.options['debug_media'] import mt_media def check(result): if result < 0: error = pa.pa_context_errno(context._context) raise MediaException(pa.pa_strerror(error)) return result def check_not_null(value): if not value: error = pa.pa_context_errno(context._context) raise MediaException(pa.pa_strerror(error)) return value class PulseAudioDriver(mt_media.AbstractAudioDriver): _context = None def __init__(self): self.threaded_mainloop = pa.pa_threaded_mainloop_new() self.mainloop = pa.pa_threaded_mainloop_get_api( self.threaded_mainloop) def create_audio_player(self, source_group, player): return PulseAudioPlayer(source_group, player) def connect(self, server=None): """Connect to pulseaudio server. :Parameters: `server` : str Server to connect to, or ``None`` for the default local server (which may be spawned as a daemon if no server is found). """ # TODO disconnect from old assert not self._context, 'Already connected' # Create context app_name = self.get_app_name() self._context = pa.pa_context_new(self.mainloop, app_name) # Context state callback self._state_cb_func = pa.pa_context_notify_cb_t(self._state_cb) pa.pa_context_set_state_callback(self._context, self._state_cb_func, None) # Connect check( pa.pa_context_connect(self._context, server, 0, None) ) self.lock() check( pa.pa_threaded_mainloop_start(self.threaded_mainloop) ) try: # Wait for context ready. self.wait() if pa.pa_context_get_state(self._context) != pa.PA_CONTEXT_READY: check(-1) finally: self.unlock() def _state_cb(self, context, userdata): if _debug: print('context state cb') state = pa.pa_context_get_state(self._context) if state in (pa.PA_CONTEXT_READY, pa.PA_CONTEXT_TERMINATED, pa.PA_CONTEXT_FAILED): self.signal() def lock(self): """Lock the threaded mainloop against events. Required for all calls into PA.""" pa.pa_threaded_mainloop_lock(self.threaded_mainloop) def unlock(self): """Unlock the mainloop thread.""" pa.pa_threaded_mainloop_unlock(self.threaded_mainloop) def signal(self): """Signal the mainloop thread to break from a wait.""" pa.pa_threaded_mainloop_signal(self.threaded_mainloop, 0) def wait(self): """Wait for a signal.""" pa.pa_threaded_mainloop_wait(self.threaded_mainloop) def sync_operation(self, op): """Wait for an operation to be done or cancelled, then release it. Uses a busy-loop -- make sure a callback is registered to signal this listener.""" while pa.pa_operation_get_state(op) == pa.PA_OPERATION_RUNNING: pa.pa_threaded_mainloop_wait(self.threaded_mainloop) pa.pa_operation_unref(op) def async_operation(self, op): """Release the operation immediately without waiting for it to complete.""" pa.pa_operation_unref(op) def get_app_name(self): """Get the application name as advertised to the pulseaudio server.""" # TODO move app name into pyglet.app (also useful for OS X menu bar?). import sys return sys.argv[0] def dump_debug_info(self): print('Client version: ', pa.pa_get_library_version()) print('Server: ', pa.pa_context_get_server(self._context)) print('Protocol: ', pa.pa_context_get_protocol_version( self._context)) print('Server protocol:', pa.pa_context_get_server_protocol_version( self._context)) print('Local context: ', ( pa.pa_context_is_local(self._context) and 'Yes' or 'No')) def delete(self): """Completely shut down pulseaudio client.""" self.lock() pa.pa_context_unref(self._context) self.unlock() pa.pa_threaded_mainloop_stop(self.threaded_mainloop) pa.pa_threaded_mainloop_free(self.threaded_mainloop) self.threaded_mainloop = None self.mainloop = None # Listener API def _set_volume(self, volume): # TODO pass def _set_position(self, position): # TODO pass def _set_forward_orientation(self, orientation): # TODO pass def _set_up_orientation(self, orientation): # TODO pass class PulseAudioPlayer(mt_media.AbstractAudioPlayer): def __init__(self, source_group, player): super().__init__(source_group, player) self._events = list() self._timestamps = list() # List of (ref_time, timestamp) self._write_index = 0 # Current write index (tracked manually) self._clear_write = False self._buffered_audio_data = None self._underflow_is_eos = False self._playing = False audio_format = source_group.audio_format assert audio_format # Create sample_spec sample_spec = pa.pa_sample_spec() if audio_format.sample_size == 8: sample_spec.format = pa.PA_SAMPLE_U8 elif audio_format.sample_size == 16: if sys.byteorder == 'little': sample_spec.format = pa.PA_SAMPLE_S16LE else: sample_spec.format = pa.PA_SAMPLE_S16BE else: raise MediaException('Unsupported sample size') sample_spec.rate = audio_format.sample_rate sample_spec.channels = audio_format.channels channel_map = None try: context.lock() # Create stream self.stream = pa.pa_stream_new(context._context, str(id(self)), sample_spec, channel_map) check_not_null(self.stream) # Callback trampoline for success operations self._success_cb_func = pa.pa_stream_success_cb_t(self._success_cb) # Callback for underflow (to detect EOS when expected pa_timestamp # does not get reached). self._underflow_cb_func = \ pa.pa_stream_notify_cb_t(self._underflow_cb) pa.pa_stream_set_underflow_callback(self.stream, self._underflow_cb_func, None) # Callback for data write self._write_cb_func = pa.pa_stream_request_cb_t(self._write_cb) pa.pa_stream_set_write_callback(self.stream, self._write_cb_func, None) # Connect to sink device = None buffer_attr = None flags = (pa.PA_STREAM_START_CORKED | pa.PA_STREAM_INTERPOLATE_TIMING) volume = None sync_stream = None # TODO use this check( pa.pa_stream_connect_playback(self.stream, device, buffer_attr, flags, volume, sync_stream) ) # Wait for stream readiness self._state_cb_func = pa.pa_stream_notify_cb_t(self._state_cb) pa.pa_stream_set_state_callback(self.stream, self._state_cb_func, None) while pa.pa_stream_get_state(self.stream) == pa.PA_STREAM_CREATING: context.wait() if pa.pa_stream_get_state(self.stream) != pa.PA_STREAM_READY: check(-1) finally: context.unlock() if _debug: print('stream ready') def _state_cb(self, stream, data): context.signal() def _success_cb(self, stream, success, data): context.signal() def _write_cb(self, stream, bytes, data): if _debug: print('write callback: %d bytes' % bytes) # Asynchronously update time if self._events: context.async_operation( pa.pa_stream_update_timing_info(self.stream, self._success_cb_func, None) ) # Grab next audio packet, or leftovers from last callback. if self._buffered_audio_data: audio_data = self._buffered_audio_data self._buffered_audio_data = None else: audio_data = self.source_group.get_audio_data(bytes) seek_flag = pa.PA_SEEK_RELATIVE if self._clear_write: if _debug: print('seek PA_SEEK_RELATIVE_ON_READ') seek_flag = pa.PA_SEEK_RELATIVE_ON_READ self._clear_write = False # Keep writing packets until `bytes` is depleted while audio_data and bytes > 0: if _debug: print('packet', audio_data.timestamp) if _debug and audio_data.events: print('events', audio_data.events) for event in audio_data.events: event_index = self._write_index + event.timestamp * \ self.source_group.audio_format.bytes_per_second self._events.append((event_index, event)) consumption = min(bytes, audio_data.length) check( pa.pa_stream_write(self.stream, audio_data.data, consumption, pa.pa_free_cb_t(0), # Data is copied 0, seek_flag) ) seek_flag = pa.PA_SEEK_RELATIVE self._timestamps.append((self._write_index, audio_data.timestamp)) self._write_index += consumption self._underflow_is_eos = False if _debug: print('write', consumption) if consumption < audio_data.length: audio_data.consume(consumption, self.source_group.audio_format) self._buffered_audio_data = audio_data break bytes -= consumption if bytes > 0: audio_data = self.source_group.get_audio_data( bytes) # TODO: name change if not audio_data: # Whole source group has been written. Any underflow encountered # after now is the EOS. self._underflow_is_eos = True # In case the source group wasn't long enough to prebuffer stream # to PA's satisfaction, trigger immediate playback (has no effect # if stream is already playing). if self._playing: context.async_operation( pa.pa_stream_trigger(self.stream, pa.pa_stream_success_cb_t(0), None) ) self._process_events() def _underflow_cb(self, stream, data): self._process_events() if self._underflow_is_eos: self._sync_dispatch_player_event('on_eos') self._sync_dispatch_player_event('on_source_group_eos') self._underflow_is_eos = False if _debug: print('eos') else: if _debug: print('underflow') # TODO: does PA automatically restart stream when buffered again? # TODO:: sometimes receive an underflow after EOS... need to # filter? def _process_events(self): if not self._events: return timing_info = pa.pa_stream_get_timing_info(self.stream) if not timing_info: if _debug: print('abort _process_events') return read_index = timing_info.contents.read_index while self._events and self._events[0][0] < read_index: _, event = self._events.pop(0) if _debug: print('dispatch event', event) event._sync_dispatch_to_player(self.player) def _sync_dispatch_player_event(self, event, *args): # TODO if EventLoop not being used, hook into # pyglet.media.dispatch_events. if pyglet.app.event_loop: pyglet.app.event_loop.post_event(self.player, event, *args) def __del__(self): try: self.delete() except: pass def delete(self): if _debug: print('delete') if not self.stream: return context.lock() pa.pa_stream_disconnect(self.stream) context.unlock() pa.pa_stream_unref(self.stream) self.stream = None def clear(self): if _debug: print('clear') self._clear_write = True self._write_index = self._get_read_index() self._timestamps = list() self._events = list() context.lock() context.sync_operation( pa.pa_stream_prebuf(self.stream, self._success_cb_func, None) ) context.unlock() def play(self): if _debug: print('play') context.lock() context.async_operation( pa.pa_stream_cork(self.stream, 0, pa.pa_stream_success_cb_t(0), None) ) # If whole stream has already been written, trigger immediate # playback. if self._underflow_is_eos: context.async_operation( pa.pa_stream_trigger(self.stream, pa.pa_stream_success_cb_t(0), None) ) context.unlock() self._playing = True def stop(self): if _debug: print('stop') context.lock() context.async_operation( pa.pa_stream_cork(self.stream, 1, pa.pa_stream_success_cb_t(0), None) ) context.unlock() self._playing = False def _get_read_index(self): time = pa.pa_usec_t() context.lock() context.sync_operation( pa.pa_stream_update_timing_info(self.stream, self._success_cb_func, None) ) context.unlock() timing_info = pa.pa_stream_get_timing_info(self.stream) if timing_info: read_index = timing_info.contents.read_index else: read_index = 0 if _debug: print('_get_read_index ->', read_index) return read_index def _get_write_index(self): timing_info = pa.pa_stream_get_timing_info(self.stream) if timing_info: write_index = timing_info.contents.write_index else: write_index = 0 if _debug: print('_get_write_index ->', write_index) return write_index def get_time(self, read_index=None): if read_index is None: read_index = self._get_read_index() write_index = 0 timestamp = 0.0 try: write_index, timestamp = self._timestamps[0] write_index, timestamp = self._timestamps[1] while read_index >= write_index: del self._timestamps[0] write_index, timestamp = self._timestamps[1] except IndexError: pass bytes_per_second = self.source_group.audio_format.bytes_per_second time = timestamp + (read_index - write_index) / float(bytes_per_second) if _debug: print('get_time ->', time) return time def set_volume(self, volume): # TODO: TODO pass def set_pitch(self, pitch): # TODO: TODO (pa_stream_update_sample_rate) pass def create_audio_driver(): global context context = PulseAudioDriver() context.connect() if _debug: context.dump_debug_info() return context
{ "content_hash": "7ddd7383f72b1368fd7bc10d294b0943", "timestamp": "", "source": "github", "line_count": 516, "max_line_length": 81, "avg_line_length": 32.05232558139535, "alnum_prop": 0.5340105205877018, "repo_name": "bitcraft/pyglet", "id": "e7480d57abb383b2a83e1aac8ca9552ab8f1de5b", "size": "16562", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "contrib/experimental/mt_media/drivers/pulse/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1828" }, { "name": "HTML", "bytes": "1652" }, { "name": "JavaScript", "bytes": "6745" }, { "name": "PHP", "bytes": "2192" }, { "name": "Python", "bytes": "6201398" }, { "name": "Shell", "bytes": "251" } ], "symlink_target": "" }
import argparse import sys import os from PIL import Image focalLength = 525.0 centerX = 319.5 centerY = 239.5 scalingFactor = 5000.0 def generate_pointcloud(rgb_file,depth_file,ply_file): rgb = Image.open(rgb_file) depth = Image.open(depth_file) if rgb.size != depth.size: raise Exception("Color and depth image do not have the same resolution.") if rgb.mode != "RGB": raise Exception("Color image is not in RGB format") if depth.mode != "I": raise Exception("Depth image is not in intensity format") points = [] for v in range(rgb.size[1]): for u in range(rgb.size[0]): color = rgb.getpixel((u,v)) Z = depth.getpixel((u,v)) / scalingFactor if Z==0: continue X = (u - centerX) * Z / focalLength Y = (v - centerY) * Z / focalLength points.append("%f %f %f %d %d %d 0\n"%(X,Y,Z,color[0],color[1],color[2])) file = open(ply_file,"w") file.write('''ply format ascii 1.0 element vertex %d property float x property float y property float z property uchar red property uchar green property uchar blue property uchar alpha end_header %s '''%(len(points),"".join(points))) file.close() if __name__ == '__main__': parser = argparse.ArgumentParser(description=''' This script reads a registered pair of color and depth images and generates a colored 3D point cloud in the PLY format. ''') parser.add_argument('rgb_file', help='input color image (format: png)') parser.add_argument('depth_file', help='input depth image (format: png)') parser.add_argument('ply_file', help='output PLY file (format: ply)') args = parser.parse_args() generate_pointcloud(args.rgb_file,args.depth_file,args.ply_file)
{ "content_hash": "99b394205b402faba886c2c898a9fbb3", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 111, "avg_line_length": 30.305084745762713, "alnum_prop": 0.6375838926174496, "repo_name": "georgebrindeiro/hybridslam", "id": "d224f6c1943936de2bd85086993eba973671f6d7", "size": "1905", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "data/rgbd_dataset/tools/generate_pointcloud.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "27480" }, { "name": "Python", "bytes": "40201" }, { "name": "Shell", "bytes": "148" } ], "symlink_target": "" }
from django.core.management.base import BaseCommand from django_datawatch.datawatch import datawatch class Command(BaseCommand): def handle(self, *args, **options): for slug in datawatch.get_all_registered_check_slugs(): print(slug)
{ "content_hash": "d160f251e7564496526f231d8296810a", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 63, "avg_line_length": 28.88888888888889, "alnum_prop": 0.7230769230769231, "repo_name": "RegioHelden/django-datawatch", "id": "e425102ed5e1739575deb5636eaa1f8a660611bb", "size": "284", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django_datawatch/management/commands/datawatch_list_checks.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "710" }, { "name": "HTML", "bytes": "17865" }, { "name": "Python", "bytes": "73346" } ], "symlink_target": "" }
import tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Integration'] , ['Lag1Trend'] , ['BestCycle'] , ['NoAR'] );
{ "content_hash": "03927e3d2b1266f06ba1e4a520d4fe0b", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 82, "avg_line_length": 38.75, "alnum_prop": 0.7096774193548387, "repo_name": "antoinecarme/pyaf", "id": "75fbb759a74cf70cd55a28f8f79b3f587c79caad", "size": "155", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_BestCycle_NoAR.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals import babel from babel import dates def _get_locale(): return 'en_US' def _get_tz(): """ Factory that returns forms's timezone :return: """ return 'UTC' def locale_factory(factory): """ Decorator which defines a factory function which set forms locale. If not defined locale 'en_US' is used :param factory: function :return: str with locale """ global _get_locale _get_locale = factory return factory def tz_factory(factory): """ Decorator which defines a factory function which set forms timezone. If not defined tz 'UTC' is used :param factory: function :return: str: with timezone """ global _get_tz _get_tz = factory return factory def get_locale(): """ Build a ``babel.Locale`` based on locale factory :return: ``babel.Locale`` """ return babel.Locale.parse(_get_locale()) def get_timezone(): """ Build a ``babel.Timezone`` based on tz factory :return: ``babel.Timezone`` """ return dates.get_timezone(_get_tz())
{ "content_hash": "551b9ff74b599a13c3b13f41e43f31ca", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 59, "avg_line_length": 19.43103448275862, "alnum_prop": 0.6317657497781721, "repo_name": "renzon/gaeforms", "id": "b2f864e90827089d7da0ae7fbadf6da4b611b66e", "size": "1151", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gaeforms/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "90365" } ], "symlink_target": "" }
""" This module provides an interface to the Elastic Compute Cloud (EC2) Auto Scaling service. """ import boto from boto.connection import AWSQueryConnection from boto.ec2.regioninfo import RegionInfo from boto.ec2.autoscale.request import Request from boto.ec2.autoscale.trigger import Trigger from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.ec2.autoscale.group import AutoScalingGroup from boto.ec2.autoscale.activity import Activity class AutoScaleConnection(AWSQueryConnection): APIVersion = boto.config.get('Boto', 'autoscale_version', '2009-05-15') Endpoint = boto.config.get('Boto', 'autoscale_endpoint', 'autoscaling.amazonaws.com') DefaultRegionName = 'us-east-1' DefaultRegionEndpoint = 'autoscaling.amazonaws.com' SignatureVersion = '2' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=1, https_connection_factory=None, region=None, path='/'): """ Init method to create a new connection to the AutoScaling service. B{Note:} The host argument is overridden by the host specified in the boto configuration file. """ if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, AutoScaleConnection) self.region = region AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path=path) def build_list_params(self, params, items, label): """ items is a list of dictionaries or strings: [{'Protocol' : 'HTTP', 'LoadBalancerPort' : '80', 'InstancePort' : '80'},..] etc. or ['us-east-1b',...] """ # different from EC2 list params for i in xrange(1, len(items)+1): if isinstance(items[i-1], dict): for k, v in items[i-1].iteritems(): params['%s.member.%d.%s' % (label, i, k)] = v elif isinstance(items[i-1], basestring): params['%s.member.%d' % (label, i)] = items[i-1] def _update_group(self, op, as_group): params = { 'AutoScalingGroupName' : as_group.name, 'Cooldown' : as_group.cooldown, 'LaunchConfigurationName' : as_group.launch_config_name, 'MinSize' : as_group.min_size, 'MaxSize' : as_group.max_size, } if op.startswith('Create'): if as_group.availability_zones: zones = as_group.availability_zones else: zones = [as_group.availability_zone] self.build_list_params(params, as_group.load_balancers, 'LoadBalancerNames') self.build_list_params(params, zones, 'AvailabilityZones') return self.get_object(op, params, Request) def create_auto_scaling_group(self, as_group): """ Create auto scaling group. """ return self._update_group('CreateAutoScalingGroup', as_group) def create_launch_configuration(self, launch_config): """ Creates a new Launch Configuration. :type launch_config: boto.ec2.autoscale.launchconfig.LaunchConfiguration :param launch_config: LaunchConfiguraiton object. """ params = { 'ImageId' : launch_config.image_id, 'KeyName' : launch_config.key_name, 'LaunchConfigurationName' : launch_config.name, 'InstanceType' : launch_config.instance_type, } if launch_config.user_data: params['UserData'] = launch_config.user_data if launch_config.kernel_id: params['KernelId'] = launch_config.kernel_id if launch_config.ramdisk_id: params['RamdiskId'] = launch_config.ramdisk_id if launch_config.block_device_mappings: self.build_list_params(params, launch_config.block_device_mappings, 'BlockDeviceMappings') self.build_list_params(params, launch_config.security_groups, 'SecurityGroups') return self.get_object('CreateLaunchConfiguration', params, Request) def create_trigger(self, trigger): """ """ params = {'TriggerName' : trigger.name, 'AutoScalingGroupName' : trigger.autoscale_group.name, 'MeasureName' : trigger.measure_name, 'Statistic' : trigger.statistic, 'Period' : trigger.period, 'Unit' : trigger.unit, 'LowerThreshold' : trigger.lower_threshold, 'LowerBreachScaleIncrement' : trigger.lower_breach_scale_increment, 'UpperThreshold' : trigger.upper_threshold, 'UpperBreachScaleIncrement' : trigger.upper_breach_scale_increment, 'BreachDuration' : trigger.breach_duration} # dimensions should be a list of tuples dimensions = [] for dim in trigger.dimensions: name, value = dim dimensions.append(dict(Name=name, Value=value)) self.build_list_params(params, dimensions, 'Dimensions') req = self.get_object('CreateOrUpdateScalingTrigger', params, Request) return req def get_all_groups(self, names=None): """ """ params = {} if names: self.build_list_params(params, names, 'AutoScalingGroupNames') return self.get_list('DescribeAutoScalingGroups', params, [('member', AutoScalingGroup)]) def get_all_launch_configurations(self, names=None): """ """ params = {} if names: self.build_list_params(params, names, 'LaunchConfigurationNames') return self.get_list('DescribeLaunchConfigurations', params, [('member', LaunchConfiguration)]) def get_all_activities(self, autoscale_group, activity_ids=None, max_records=100): """ Get all activities for the given autoscaling group. :type autoscale_group: str or AutoScalingGroup object :param autoscale_group: The auto scaling group to get activities on. @max_records: int :param max_records: Maximum amount of activities to return. """ name = autoscale_group if isinstance(autoscale_group, AutoScalingGroup): name = autoscale_group.name params = {'AutoScalingGroupName' : name} if activity_ids: self.build_list_params(params, activity_ids, 'ActivityIds') return self.get_list('DescribeScalingActivities', params, [('member', Activity)]) def get_all_triggers(self, autoscale_group): params = {'AutoScalingGroupName' : autoscale_group} return self.get_list('DescribeTriggers', params, [('member', Trigger)]) def terminate_instance(self, instance_id, decrement_capacity=True): params = { 'InstanceId' : instance_id, 'ShouldDecrementDesiredCapacity' : decrement_capacity } return self.get_object('TerminateInstanceInAutoScalingGroup', params, Activity)
{ "content_hash": "632b2d8ed2e20d860e70f11ad90b71b3", "timestamp": "", "source": "github", "line_count": 192, "max_line_length": 87, "avg_line_length": 43.34375, "alnum_prop": 0.5471040615236722, "repo_name": "charlescearl/VirtualMesos", "id": "58b1bb06ec768d2196b33575f83d48bc911c5ba8", "size": "9422", "binary": false, "copies": "5", "ref": "refs/heads/mesos-vm", "path": "third_party/boto-2.0b2/boto/ec2/autoscale/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "2047732" }, { "name": "C++", "bytes": "44919805" }, { "name": "D", "bytes": "3341703" }, { "name": "Emacs Lisp", "bytes": "7798" }, { "name": "Java", "bytes": "14984708" }, { "name": "JavaScript", "bytes": "39087" }, { "name": "Objective-C", "bytes": "118273" }, { "name": "PHP", "bytes": "152555" }, { "name": "Perl", "bytes": "623347" }, { "name": "Python", "bytes": "3910489" }, { "name": "Ruby", "bytes": "67470" }, { "name": "Shell", "bytes": "15673503" }, { "name": "Smalltalk", "bytes": "56562" }, { "name": "VimL", "bytes": "3774" } ], "symlink_target": "" }
from django import template register = template.Library() @register.simple_tag(takes_context = True) def getLoggedUser(context): request = context['request'] if 'user' in request.session: return request.session['user'] else: return ''
{ "content_hash": "012424cafb486063da3c9c8810e4d9a9", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 42, "avg_line_length": 22.181818181818183, "alnum_prop": 0.7377049180327869, "repo_name": "luiscarlosgph/nas", "id": "104267355465012e7a6397b481a192a4edb55393", "size": "244", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "authentication/templatetags/authentication_extras.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "157451" }, { "name": "JavaScript", "bytes": "736429" }, { "name": "PHP", "bytes": "1838012" }, { "name": "Perl", "bytes": "874" }, { "name": "Python", "bytes": "5868555" }, { "name": "Shell", "bytes": "3887" } ], "symlink_target": "" }
from __future__ import unicode_literals import json from mock import Mock import pytest from boxsdk.network.default_network import DefaultNetworkResponse @pytest.fixture(scope='session') def generic_successful_response(): mock_network_response = Mock(DefaultNetworkResponse) mock_network_response.content = b'{"message": "success"}' mock_network_response.status_code = 200 mock_network_response.ok = True mock_network_response.raw = Mock() return mock_network_response @pytest.fixture(scope='session') def successful_token_json_response(access_token, refresh_token): # pylint:disable=redefined-outer-name return { 'access_token': access_token, 'expires_in': 3600, 'restricted_to': [], 'token_type': 'bearer', 'refresh_token': refresh_token, } @pytest.fixture(scope='session') def successful_token_response(successful_token_mock, successful_token_json_response): # pylint:disable=redefined-outer-name successful_token_mock.json = Mock(return_value=successful_token_json_response) successful_token_mock.ok = True successful_token_mock.content = json.dumps(successful_token_json_response) successful_token_mock.status_code = 200 return successful_token_mock @pytest.fixture(scope='session') def successful_token_mock(): return Mock(DefaultNetworkResponse) @pytest.fixture(scope='session') def unauthorized_response(): res = Mock(DefaultNetworkResponse) res.content = b'' res.status_code = 401 res.ok = False return res @pytest.fixture(scope='session') def non_json_response(): mock_network_response = Mock(DefaultNetworkResponse) mock_network_response.content = b'' mock_network_response.ok = True mock_network_response.status_code = 200 mock_network_response.json.side_effect = ValueError('No JSON object could be decoded') return mock_network_response @pytest.fixture(scope='session', params=[202, 429]) def retry_after_response(request): mock_network_response = Mock(DefaultNetworkResponse) mock_network_response.status_code = int(request.param) mock_network_response.headers = {'Retry-After': '1'} return mock_network_response @pytest.fixture(scope='session', params=[502, 503]) def server_error_response(request): mock_network_response = Mock(DefaultNetworkResponse) mock_network_response.status_code = int(request.param) return mock_network_response @pytest.fixture(scope='session') def bad_network_response(): mock_network_response = Mock(DefaultNetworkResponse) mock_network_response.status_code = 404 mock_network_response.json.return_value = {'code': 404, 'message': 'Not Found'} mock_network_response.ok = False return mock_network_response @pytest.fixture(scope='session') def failed_non_json_response(): mock_network_response = Mock(DefaultNetworkResponse) mock_network_response.status_code = 404 mock_network_response.json.side_effect = ValueError('No JSON object could be decoded') mock_network_response.ok = False return mock_network_response @pytest.fixture(scope='session') def access_token(): return 'T9cE5asGnuyYCCqIZFoWjFHvNbvVqHjl' @pytest.fixture(scope='session') def refresh_token(): return 'J7rxTiWOHMoSC1isKZKBZWizoRXjkQzig5C6jFgCVJ9bUnsUfGMinKBDLZWP9BgRb' @pytest.fixture(scope='session') def test_url(): return 'https://box.com/test/url' @pytest.fixture(scope='session') def client_id(): return 'fake_client_id' @pytest.fixture(scope='session') def client_secret(): return 'fake_client_secret' @pytest.fixture(scope='session') def auth_code(): return 'fake_auth_code' @pytest.fixture(params=[ b'Hello', 'Goodbye', '42', ]) def test_file_content(request): return request.param @pytest.fixture() def update_file_content(test_file_content): # pylint:disable=redefined-outer-name return test_file_content @pytest.fixture() def test_file_path(): return 'path/to/file'
{ "content_hash": "e0ea7a261cdf86b2f4b4e2881610e910", "timestamp": "", "source": "github", "line_count": 144, "max_line_length": 90, "avg_line_length": 27.73611111111111, "alnum_prop": 0.7243365047571357, "repo_name": "jwkozel/demobx", "id": "72ca5a8d2310fb760c8ed12eb88b578c92fc391a", "size": "4011", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "test/conftest.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "309050" }, { "name": "Smarty", "bytes": "527" } ], "symlink_target": "" }
""" Support for Blink system camera control. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/binary_sensor.blink/ """ from homeassistant.components.blink import DOMAIN from homeassistant.components.binary_sensor import BinarySensorDevice DEPENDENCIES = ['blink'] def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the blink binary sensors.""" if discovery_info is None: return data = hass.data[DOMAIN].blink devs = list() for name in data.cameras: devs.append(BlinkCameraMotionSensor(name, data)) devs.append(BlinkSystemSensor(data)) add_devices(devs, True) class BlinkCameraMotionSensor(BinarySensorDevice): """A representation of a Blink binary sensor.""" def __init__(self, name, data): """Initialize the sensor.""" self._name = 'blink_' + name + '_motion_enabled' self._camera_name = name self.data = data self._state = self.data.cameras[self._camera_name].armed @property def name(self): """Return the name of the blink sensor.""" return self._name @property def is_on(self): """Return the status of the sensor.""" return self._state def update(self): """Update sensor state.""" self.data.refresh() self._state = self.data.cameras[self._camera_name].armed class BlinkSystemSensor(BinarySensorDevice): """A representation of a Blink system sensor.""" def __init__(self, data): """Initialize the sensor.""" self._name = 'blink armed status' self.data = data self._state = self.data.arm @property def name(self): """Return the name of the blink sensor.""" return self._name.replace(" ", "_") @property def is_on(self): """Return the status of the sensor.""" return self._state def update(self): """Update sensor state.""" self.data.refresh() self._state = self.data.arm
{ "content_hash": "d2c1f5a87be6e454d071f45a1d58c235", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 74, "avg_line_length": 27.87837837837838, "alnum_prop": 0.6325739214735822, "repo_name": "miniconfig/home-assistant", "id": "8d84ffb9c90ee903d99859231da4c34450d02147", "size": "2063", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/binary_sensor/blink.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1601137" }, { "name": "Python", "bytes": "5622178" }, { "name": "Ruby", "bytes": "517" }, { "name": "Shell", "bytes": "15016" } ], "symlink_target": "" }
from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture from nova.virt import fake class TestResizeWithNoAllocationScheduler( test.TestCase, integrated_helpers.InstanceHelperMixin): """Regression tests for bug #1741307 Some scheduler drivers, like the old CachingScheduler driver, do not use Placement to make claims (allocations) against compute nodes during scheduling like the FilterScheduler does. During a cold migrate / resize, the FilterScheduler will "double up" the instance allocations so the instance has resource allocations made against both the source node and the chosen destination node. Conductor will then attempt to "swap" the source node allocation to the migration record. If using a non-Placement driver, there are no allocations for the instance on the source node and conductor fails. Note that if the compute running the instance was running Ocata code or older, then the compute itself would create the allocations in Placement via the ResourceTracker, but once all computes are upgraded to Pike or newer, the compute no longer creates allocations in Placement because it assumes the scheduler is doing that, which is not the case with these outlier scheduler drivers. This is a regression test to show the failure before it's fixed and then can be used to confirm the fix. """ microversion = 'latest' def setUp(self): super(TestResizeWithNoAllocationScheduler, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.admin_api self.api.microversion = self.microversion nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) self.start_service('conductor') # Create two compute nodes/services. for host in ('host1', 'host2'): fake.set_nodes([host]) self.addCleanup(fake.restore_nodes) self.start_service('compute', host=host) scheduler_service = self.start_service('scheduler') # We need to mock the FilterScheduler to not use Placement so that # allocations won't be created during scheduling. scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = False flavors = self.api.get_flavors() self.old_flavor = flavors[0] self.new_flavor = flavors[1] def test_resize(self): # Create our server without networking just to keep things simple. server_req = self._build_minimal_create_server_request( self.api, 'test-resize', flavor_id=self.old_flavor['id'], image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', networks='none') server = self.api.post_server({'server': server_req}) server = self._wait_for_state_change(self.api, server, 'ACTIVE') original_host = server['OS-EXT-SRV-ATTR:host'] target_host = 'host1' if original_host == 'host2' else 'host2' # Issue the resize request. post = { 'resize': { 'flavorRef': self.new_flavor['id'] } } self.api.post_server_action(server['id'], post) # Poll the server until the resize is done. server = self._wait_for_state_change( self.api, server, 'VERIFY_RESIZE') # Assert that the server was migrated to the other host. self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host']) # Confirm the resize. post = {'confirmResize': None} self.api.post_server_action(server['id'], post, check_response_status=[204])
{ "content_hash": "dae0fb29b03054c6de528ee2125c3fda", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 78, "avg_line_length": 43.125, "alnum_prop": 0.6797101449275362, "repo_name": "mikalstill/nova", "id": "c8824f23178e4d93779bc7cdd7ed93d111aedb0f", "size": "4684", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nova/tests/functional/regressions/test_bug_1741307.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PHP", "bytes": "3325" }, { "name": "Python", "bytes": "22797282" }, { "name": "Shell", "bytes": "32969" }, { "name": "Smarty", "bytes": "418399" } ], "symlink_target": "" }
import ldap import copy import constants # Local module # Helper function def mergeDicts(toDict, fromDict): #Copy items from 'fromDict' to 'toDict' if not (isinstance(fromDict, dict) and isinstance(toDict,dict)): return None for key in fromDict: keyCopy = copy.copy(key) valueCopy = copy.copy(fromDict[key]) toDict[keyCopy] = valueCopy def getLDAPInfo(searchParamDict): # Return results of a query on an ldap directory, given queryParameters # and/or searchFilters eg # To get users' phone numbers and emails whose # username is 'konradOno' # results = getLDAPInfo( # { # constants.SEARCH_KEY_WORD:'(uid=*konradOno)', # constants.SEARCH_FILTERS_KEY:[constants.LDAP_MAIL_KEY, # constants.LDAP_PHONE_KEY] # }) #The results with have a meta section and a data section dataArray = list() metaDict = dict() resultsDict = dict() resultsDict[constants.DATA_KEY] = dataArray resultsDict[constants.META_KEY] = metaDict #metaDict[constants.SEARCH_PARAMS_KEY] = None #metaDict[constants.SEARCH_FILTERS_KEY] = None metaDict[constants.SUCCESS_STATUS_KEY] = False searchKeyWord = searchParamDict.get(constants.SEARCH_KEYWORD_KEY) print(searchParamDict, constants.SEARCH_KEYWORD_KEY) if hasattr(searchKeyWord, '__iter__'): searchKeyWord = "&".join(filter(lambda s: s, searchKeyWord)) if not (isinstance(searchParamDict, dict) and searchKeyWord): return resultsDict print("srPD ", searchParamDict) # !!! Once you are ready to deploy and have made your certificate signed # and recognized by the UOFA, set the last argument to True !! ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, False) uofa_ldapObject = ldap.initialize(constants.UOFA_LDAP_URI) defaultQueryParams = [constants.LDAP_SURNAME_KEY, constants.LDAP_PHONE_KEY, constants.LDAP_GIVEN_NAME_KEY, constants.LDAP_UID_KEY] givenFilters = searchParamDict.get(constants.SEARCH_FILTERS_KEY, defaultQueryParams) searchFilters = list() for aFilter in givenFilters: if ',' in aFilter: delimitedFilters = aFilter.split(',') searchFilters += map(lambda s: str(s), filter(lambda a:a, delimitedFilters)) else: searchFilters.append(str(aFilter)) print("searchFs ", searchFilters) #metaDict[constants.SEARCH_PARAMS_KEY] = searchKeyWord #metaDict[constants.SEARCH_FILTERS_KEY] = searchFilters try: searchResults = uofa_ldapObject.search_s( constants.UALBERTA_PEOPLE_TREE_SEARCH, ldap.SCOPE_SUBTREE,searchKeyWord,searchFilters ) except Exception: #An unhandled exception occured here, implement handling later print("Exception here") else: metaDict[constants.SUCCESS_STATUS_KEY] = True resultsLen = len(searchResults) metaDict[constants.COUNT_KEY] = resultsLen dataArray = map(lambda tup: tup[1], searchResults) resultsDict[constants.DATA_KEY] = dataArray return resultsDict def main(): results = getLDAPInfo( { constants.SEARCH_KEYWORD_KEY:'(uid=*klind*)', constants.SEARCH_FILTERS_KEY : [ constants.LDAP_EMAIL_KEY, constants.LDAP_PHONE_KEY ] } ) print(results) if __name__ == '__main__': main()
{ "content_hash": "ea3b591d31b9786d5af187f1d9084c7f", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 90, "avg_line_length": 34.61290322580645, "alnum_prop": 0.7039453246349798, "repo_name": "odeke-em/ldappy", "id": "0af6765a852ab40a5a722e8b3593a796bdd28c3b", "size": "3374", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ldapUsage/ldapQuerying.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "9641" } ], "symlink_target": "" }
import copy from direct.directnotify import DirectNotifyGlobal from direct.interval.IntervalGlobal import * from direct.showbase import AppRunnerGlobal from direct.showbase import DirectObject from direct.showbase import PythonUtil import os from pandac.PandaModules import * import re import sys import token import tokenize from StringIO import StringIO import BlinkingArrows from otp.speedchat import SpeedChatGlobals from toontown.ai import DistributedBlackCatMgr from toontown.char import Char from toontown.char import CharDNA from toontown.chat.ChatGlobals import * from toontown.suit import Suit from toontown.suit import SuitDNA from toontown.toon import ToonHeadFrame from toontown.toonbase import TTLocalizer from toontown.toonbase import ToontownBattleGlobals from toontown.quest import QuestScripts notify = DirectNotifyGlobal.directNotify.newCategory('QuestParser') lineDict = {} globalVarDict = {} curId = None FLOAT = re.compile(r'[+-]?\d+[.]\d*([e][+-]\d+)?') def init(): globalVarDict.update({'render': render, 'camera': camera, 'hidden': hidden, 'aspect2d': aspect2d, 'localToon': base.localAvatar, 'laffMeter': base.localAvatar.laffMeter, 'inventory': base.localAvatar.inventory, 'bFriendsList': base.localAvatar.bFriendsList, 'book': base.localAvatar.book, 'bookPrevArrow': base.localAvatar.book.prevArrow, 'bookNextArrow': base.localAvatar.book.nextArrow, 'bookOpenButton': base.localAvatar.book.bookOpenButton, 'bookCloseButton': base.localAvatar.book.bookCloseButton, 'chatNormalButton': base.localAvatar.chatMgr.normalButton, 'chatScButton': base.localAvatar.chatMgr.scButton, 'arrows': BlinkingArrows.BlinkingArrows()}) def clear(): globalVarDict.clear() def readFile(): global curId script = StringIO(QuestScripts.SCRIPT) def readLine(): return script.readline().replace('\r', '') gen = tokenize.generate_tokens(readLine) line = getLineOfTokens(gen) while line is not None: if line == []: line = getLineOfTokens(gen) continue if line[0] == 'ID': parseId(line) elif curId is None: notify.error('Every script must begin with an ID') else: lineDict[curId].append(line) line = getLineOfTokens(gen) script.close() def getLineOfTokens(gen): tokens = [] nextNeg = 0 try: token = gen.next() except StopIteration: return None if token[0] == tokenize.ENDMARKER: return None while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL: if token[0] == tokenize.COMMENT: pass elif token[0] == tokenize.OP and token[1] == '-': nextNeg = 1 elif token[0] == tokenize.NUMBER: if re.match(FLOAT, token[1]): number = float(token[1]) else: number = int(token[1]) if nextNeg: tokens.append(-number) nextNeg = 0 else: tokens.append(number) elif token[0] == tokenize.STRING: tokens.append(eval(token[1])) elif token[0] == tokenize.NAME: tokens.append(token[1]) else: notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0])) try: token = gen.next() except StopIteration: break return tokens def parseId(line): global curId curId = line[1] notify.debug('Setting current scriptId to: %s' % curId) if questDefined(curId): notify.error('Already defined scriptId: %s' % curId) else: lineDict[curId] = [] def questDefined(scriptId): return scriptId in lineDict class NPCMoviePlayer(DirectObject.DirectObject): def __init__(self, scriptId, toon, npc): DirectObject.DirectObject.__init__(self) self.scriptId = scriptId self.toon = toon self.isLocalToon = self.toon == base.localAvatar self.npc = npc self.privateVarDict = {} self.toonHeads = {} self.chars = [] self.uniqueId = 'scriptMovie_' + str(self.scriptId) + '_' + str(toon.getDoId()) + '_' + str(npc.getDoId()) self.setVar('toon', self.toon) self.setVar('npc', self.npc) self.chapterDict = {} self.timeoutTrack = None self.currentTrack = None return def getVar(self, varName): if varName in self.privateVarDict: return self.privateVarDict[varName] elif varName in globalVarDict: return globalVarDict[varName] elif varName.find('tomDialogue') > -1 or varName.find('harryDialogue') > -1: notify.warning('%s getting referenced. Tutorial Ack: %d Place: %s' % (varName, base.localAvatar.tutorialAck, base.cr.playGame.hood)) return None else: notify.error('Variable not defined: %s' % varName) return None def delVar(self, varName): if varName in self.privateVarDict: del self.privateVarDict[varName] elif varName in globalVarDict: del globalVarDict[varName] else: notify.warning('Variable not defined: %s' % varName) def setVar(self, varName, var): self.privateVarDict[varName] = var def cleanup(self): if self.currentTrack: self.currentTrack.pause() self.currentTrack = None self.ignoreAll() taskMgr.remove(self.uniqueId) for toonHeadFrame in self.toonHeads.values(): toonHeadFrame.destroy() while self.chars: self.__unloadChar(self.chars[0]) del self.toonHeads del self.privateVarDict del self.chapterDict del self.toon del self.npc del self.timeoutTrack return def __unloadChar(self, char): char.removeActive() if char.style.name == 'mk' or char.style.name == 'mn': char.stopEarTask() char.delete() self.chars.remove(char) def timeout(self, fFinish = 0): if self.timeoutTrack: if fFinish: self.timeoutTrack.finish() else: self.timeoutTrack.start() def finishMovie(self): self.npc.finishMovie(self.toon, self.isLocalToon, 0.0) def playNextChapter(self, eventName, timeStamp = 0.0): trackList = self.chapterDict[eventName] if trackList: self.currentTrack = trackList.pop(0) self.currentTrack.start() else: notify.debug('Movie ended waiting for an event (%s)' % eventName) def play(self): lineNum = 0 self.currentEvent = 'start' lines = lineDict.get(self.scriptId) if lines is None: notify.error('No movie defined for scriptId: %s' % self.scriptId) chapterList = [] timeoutList = [] for line in lines: lineNum += 1 command = line[0] if command == 'UPON_TIMEOUT': uponTimeout = 1 iList = timeoutList line = line[1:] command = line[0] else: uponTimeout = 0 iList = chapterList if command == 'CALL': if uponTimeout: self.notify.error('CALL not allowed in an UPON_TIMEOUT') iList.append(self.parseCall(line)) continue elif command == 'DEBUG': iList.append(self.parseDebug(line)) continue elif command == 'WAIT': if uponTimeout: self.notify.error('WAIT not allowed in an UPON_TIMEOUT') iList.append(self.parseWait(line)) continue elif command == 'CHAT': iList.append(self.parseChat(line)) continue elif command == 'CLEAR_CHAT': iList.append(self.parseClearChat(line)) continue elif command == 'FINISH_QUEST_MOVIE': chapterList.append(Func(self.finishMovie)) continue elif command == 'CHAT_CONFIRM': if uponTimeout: self.notify.error('CHAT_CONFIRM not allowed in an UPON_TIMEOUT') avatarName = line[1] avatar = self.getVar(avatarName) nextEvent = avatar.uniqueName('doneChatPage') iList.append(Func(self.acceptOnce, nextEvent, self.playNextChapter, [nextEvent])) iList.append(self.parseChatConfirm(line)) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent continue elif command == 'LOCAL_CHAT_CONFIRM': if uponTimeout: self.notify.error('LOCAL_CHAT_CONFIRM not allowed in an UPON_TIMEOUT') avatarName = line[1] avatar = self.getVar(avatarName) nextEvent = avatar.uniqueName('doneChatPage') iList.append(Func(self.acceptOnce, nextEvent, self.playNextChapter, [nextEvent])) iList.append(self.parseLocalChatConfirm(line)) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent continue elif command == 'LOCAL_CHAT_PERSIST': iList.append(self.parseLocalChatPersist(line)) continue elif command == 'LOCAL_CHAT_TO_CONFIRM': if uponTimeout: self.notify.error('LOCAL_CHAT_TO_CONFIRM not allowed in an UPON_TIMEOUT') avatarName = line[1] avatar = self.getVar(avatarName) nextEvent = avatar.uniqueName('doneChatPage') iList.append(Func(self.acceptOnce, nextEvent, self.playNextChapter, [nextEvent])) iList.append(self.parseLocalChatToConfirm(line)) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent continue elif command == 'CC_CHAT_CONFIRM': if uponTimeout: self.notify.error('CC_CHAT_CONFIRM not allowed in an UPON_TIMEOUT') avatarName = line[1] avatar = self.getVar(avatarName) nextEvent = avatar.uniqueName('doneChatPage') iList.append(Func(self.acceptOnce, nextEvent, self.playNextChapter, [nextEvent])) iList.append(self.parseCCChatConfirm(line)) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent continue elif command == 'CC_CHAT_TO_CONFIRM': if uponTimeout: self.notify.error('CC_CHAT_TO_CONFIRM not allowed in an UPON_TIMEOUT') avatarName = line[1] avatar = self.getVar(avatarName) nextEvent = avatar.uniqueName('doneChatPage') iList.append(Func(self.acceptOnce, nextEvent, self.playNextChapter, [nextEvent])) iList.append(self.parseCCChatToConfirm(line)) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent continue if self.isLocalToon: if command == 'LOAD': self.parseLoad(line) elif command == 'LOAD_SFX': self.parseLoadSfx(line) elif command == 'LOAD_DIALOGUE': self.parseLoadDialogue(line) elif command == 'LOAD_CC_DIALOGUE': self.parseLoadCCDialogue(line) elif command == 'LOAD_CHAR': self.parseLoadChar(line) elif command == 'LOAD_CLASSIC_CHAR': self.parseLoadClassicChar(line) elif command == 'UNLOAD_CHAR': iList.append(self.parseUnloadChar(line)) elif command == 'LOAD_SUIT': self.parseLoadSuit(line) elif command == 'SET': self.parseSet(line) elif command == 'LOCK_LOCALTOON': iList.append(self.parseLockLocalToon(line)) elif command == 'FREE_LOCALTOON': iList.append(self.parseFreeLocalToon(line)) elif command == 'REPARENTTO': iList.append(self.parseReparent(line)) elif command == 'WRTREPARENTTO': iList.append(self.parseWrtReparent(line)) elif command == 'SHOW': iList.append(self.parseShow(line)) elif command == 'HIDE': iList.append(self.parseHide(line)) elif command == 'POS': iList.append(self.parsePos(line)) elif command == 'HPR': iList.append(self.parseHpr(line)) elif command == 'SCALE': iList.append(self.parseScale(line)) elif command == 'POSHPRSCALE': iList.append(self.parsePosHprScale(line)) elif command == 'COLOR': iList.append(self.parseColor(line)) elif command == 'COLOR_SCALE': iList.append(self.parseColorScale(line)) elif command == 'ADD_LAFFMETER': iList.append(self.parseAddLaffMeter(line)) elif command == 'LAFFMETER': iList.append(self.parseLaffMeter(line)) elif command == 'OBSCURE_LAFFMETER': iList.append(self.parseObscureLaffMeter(line)) elif command == 'ARROWS_ON': iList.append(self.parseArrowsOn(line)) elif command == 'ARROWS_OFF': iList.append(self.parseArrowsOff(line)) elif command == 'START_THROB': iList.append(self.parseStartThrob(line)) elif command == 'STOP_THROB': iList.append(self.parseStopThrob(line)) elif command == 'SHOW_FRIENDS_LIST': iList.append(self.parseShowFriendsList(line)) elif command == 'HIDE_FRIENDS_LIST': iList.append(self.parseHideFriendsList(line)) elif command == 'SHOW_BOOK': iList.append(self.parseShowBook(line)) elif command == 'HIDE_BOOK': iList.append(self.parseHideBook(line)) elif command == 'ENABLE_CLOSE_BOOK': iList.append(self.parseEnableCloseBook(line)) elif command == 'OBSCURE_BOOK': iList.append(self.parseObscureBook(line)) elif command == 'OBSCURE_CHAT': iList.append(self.parseObscureChat(line)) elif command == 'ADD_INVENTORY': iList.append(self.parseAddInventory(line)) elif command == 'SET_INVENTORY': iList.append(self.parseSetInventory(line)) elif command == 'SET_INVENTORY_YPOS': iList.append(self.parseSetInventoryYPos(line)) elif command == 'SET_INVENTORY_DETAIL': iList.append(self.parseSetInventoryDetail(line)) elif command == 'PLAY_SFX': iList.append(self.parsePlaySfx(line)) elif command == 'STOP_SFX': iList.append(self.parseStopSfx(line)) elif command == 'PLAY_ANIM': iList.append(self.parsePlayAnim(line)) elif command == 'LOOP_ANIM': iList.append(self.parseLoopAnim(line)) elif command == 'LERP_POS': iList.append(self.parseLerpPos(line)) elif command == 'LERP_HPR': iList.append(self.parseLerpHpr(line)) elif command == 'LERP_SCALE': iList.append(self.parseLerpScale(line)) elif command == 'LERP_POSHPRSCALE': iList.append(self.parseLerpPosHprScale(line)) elif command == 'LERP_COLOR': iList.append(self.parseLerpColor(line)) elif command == 'LERP_COLOR_SCALE': iList.append(self.parseLerpColorScale(line)) elif command == 'DEPTH_WRITE_ON': iList.append(self.parseDepthWriteOn(line)) elif command == 'DEPTH_WRITE_OFF': iList.append(self.parseDepthWriteOff(line)) elif command == 'DEPTH_TEST_ON': iList.append(self.parseDepthTestOn(line)) elif command == 'DEPTH_TEST_OFF': iList.append(self.parseDepthTestOff(line)) elif command == 'SET_BIN': iList.append(self.parseSetBin(line)) elif command == 'CLEAR_BIN': iList.append(self.parseClearBin(line)) elif command == 'TOON_HEAD': iList.append(self.parseToonHead(line)) elif command == 'SEND_EVENT': iList.append(self.parseSendEvent(line)) elif command == 'FUNCTION': iList.append(self.parseFunction(line)) elif command == 'BLACK_CAT_LISTEN': iList.append(self.parseBlackCatListen(line)) elif command == 'SHOW_THROW_SQUIRT_PREVIEW': if uponTimeout: self.notify.error('SHOW_THROW_SQUIRT_PREVIEW not allowed in an UPON_TIMEOUT') nextEvent = 'doneThrowSquirtPreview' iList.append(Func(self.acceptOnce, nextEvent, self.playNextChapter, [nextEvent])) iList.append(self.parseThrowSquirtPreview(line)) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent elif command == 'WAIT_EVENT': if uponTimeout: self.notify.error('WAIT_EVENT not allowed in an UPON_TIMEOUT') nextEvent = self.parseWaitEvent(line) def proceed(self = self, nextEvent = nextEvent): self.playNextChapter(nextEvent) def handleEvent(*args): proceed = args[0] proceed() iList.append(Func(self.acceptOnce, nextEvent, handleEvent, [proceed])) self.closePreviousChapter(iList) chapterList = [] self.currentEvent = nextEvent elif command == 'SET_MUSIC_VOLUME': iList.append(self.parseSetMusicVolume(line)) else: notify.warning('Unknown command token: %s for scriptId: %s on line: %s' % (command, self.scriptId, lineNum)) self.closePreviousChapter(chapterList) if timeoutList: self.timeoutTrack = Sequence(*timeoutList) self.playNextChapter('start') return def closePreviousChapter(self, iList): trackList = self.chapterDict.setdefault(self.currentEvent, []) trackList.append(Sequence(*iList)) def parseLoad(self, line): if len(line) == 3: token, varName, modelPath = line node = loader.loadModel(modelPath.replace('"', '')) elif len(line) == 4: token, varName, modelPath, subNodeName = line node = loader.loadModel(modelPath.replace('"', '')).find('**/' + subNodeName) else: notify.error('invalid parseLoad command') self.setVar(varName, node) def parseLoadSfx(self, line): token, varName, fileName = line sfx = base.loadSfx(fileName) self.setVar(varName, sfx) def parseLoadDialogue(self, line): token, varName, fileName = line if varName == 'tomDialogue_01': notify.debug('VarName tomDialogue getting added. Tutorial Ack: %d' % base.localAvatar.tutorialAck) if base.config.GetString('language', 'english') == 'japanese': dialogue = base.loadSfx(fileName) else: dialogue = None self.setVar(varName, dialogue) return def parseLoadCCDialogue(self, line): token, varName, filenameTemplate = line if self.toon.getStyle().gender == 'm': classicChar = 'mickey' else: classicChar = 'minnie' filename = filenameTemplate % classicChar if base.config.GetString('language', 'english') == 'japanese': dialogue = base.loadSfx(filename) else: dialogue = None self.setVar(varName, dialogue) return def parseLoadChar(self, line): token, name, charType = line char = Char.Char() dna = CharDNA.CharDNA() dna.newChar(charType) char.setDNA(dna) if charType == 'mk' or charType == 'mn': char.startEarTask() char.nametag.manage(base.marginManager) char.addActive() char.hideName() self.setVar(name, char) def parseLoadClassicChar(self, line): token, name = line char = Char.Char() dna = CharDNA.CharDNA() if self.toon.getStyle().gender == 'm': charType = 'mk' else: charType = 'mn' dna.newChar(charType) char.setDNA(dna) char.startEarTask() char.nametag.manage(base.marginManager) char.addActive() char.hideName() self.setVar(name, char) self.chars.append(char) def parseUnloadChar(self, line): token, name = line char = self.getVar(name) track = Sequence() track.append(Func(self.__unloadChar, char)) track.append(Func(self.delVar, name)) return track def parseLoadSuit(self, line): token, name, suitType = line suit = Suit.Suit() dna = SuitDNA.SuitDNA() dna.newSuit(suitType) suit.setDNA(dna) self.setVar(name, suit) def parseSet(self, line): token, varName, value = line self.setVar(varName, value) def parseCall(self, line): token, scriptId = line nmp = NPCMoviePlayer(scriptId, self.toon, self.npc) return Func(nmp.play) def parseLockLocalToon(self, line): return Sequence(Func(self.toon.detachCamera), Func(self.toon.collisionsOff), Func(self.toon.disableAvatarControls), Func(self.toon.stopTrackAnimToSpeed), Func(self.toon.stopUpdateSmartCamera)) def parseFreeLocalToon(self, line): return Sequence(Func(self.toon.attachCamera), Func(self.toon.startTrackAnimToSpeed), Func(self.toon.collisionsOn), Func(self.toon.enableAvatarControls), Func(self.toon.startUpdateSmartCamera)) def parseDebug(self, line): token, str = line return Func(notify.debug, str) def parseReparent(self, line): if len(line) == 3: token, childNodeName, parentNodeName = line subNodeName = None elif len(line) == 4: token, childNodeName, parentNodeName, subNodeName = line childNode = self.getVar(childNodeName) if subNodeName: parentNode = self.getVar(parentNodeName).find(subNodeName) else: parentNode = self.getVar(parentNodeName) return ParentInterval(childNode, parentNode) def parseWrtReparent(self, line): if len(line) == 3: token, childNodeName, parentNodeName = line subNodeName = None elif len(line) == 4: token, childNodeName, parentNodeName, subNodeName = line childNode = self.getVar(childNodeName) if subNodeName: parentNode = self.getVar(parentNodeName).find(subNodeName) else: parentNode = self.getVar(parentNodeName) return WrtParentInterval(childNode, parentNode) def parseShow(self, line): token, nodeName = line node = self.getVar(nodeName) return Func(node.show) def parseHide(self, line): token, nodeName = line node = self.getVar(nodeName) return Func(node.hide) def parsePos(self, line): token, nodeName, x, y, z = line node = self.getVar(nodeName) return Func(node.setPos, x, y, z) def parseHpr(self, line): token, nodeName, h, p, r = line node = self.getVar(nodeName) return Func(node.setHpr, h, p, r) def parseScale(self, line): token, nodeName, x, y, z = line node = self.getVar(nodeName) return Func(node.setScale, x, y, z) def parsePosHprScale(self, line): token, nodeName, x, y, z, h, p, r, sx, sy, sz = line node = self.getVar(nodeName) return Func(node.setPosHprScale, x, y, z, h, p, r, sx, sy, sz) def parseColor(self, line): token, nodeName, r, g, b, a = line node = self.getVar(nodeName) return Func(node.setColor, r, g, b, a) def parseColorScale(self, line): token, nodeName, r, g, b, a = line node = self.getVar(nodeName) return Func(node.setColorScale, r, g, b, a) def parseWait(self, line): token, waitTime = line return Wait(waitTime) def parseChat(self, line): toonId = self.toon.getDoId() avatarName = line[1] avatar = self.getVar(avatarName) chatString = eval('TTLocalizer.' + line[2]) chatFlags = CFSpeech | CFTimeout quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[3:]) if extraChatFlags: chatFlags |= extraChatFlags if len(dialogueList) > 0: dialogue = dialogueList[0] else: dialogue = None return Func(avatar.setChatAbsolute, chatString, chatFlags, dialogue) def parseClearChat(self, line): toonId = self.toon.getDoId() avatarName = line[1] avatar = self.getVar(avatarName) chatFlags = CFSpeech | CFTimeout return Func(avatar.setChatAbsolute, '', chatFlags) def parseExtraChatArgs(self, args): quitButton = 0 extraChatFlags = None dialogueList = [] for arg in args: if type(arg) == type(0): quitButton = arg elif type(arg) == type(''): if len(arg) > 2 and arg[:2] == 'CF': extraChatFlags = eval(arg) else: dialogueList.append(self.getVar(arg)) else: pass #notify.error('invalid argument type') return (quitButton, extraChatFlags, dialogueList) def parseChatConfirm(self, line): lineLength = len(line) toonId = self.toon.getDoId() avatarName = line[1] avatar = self.getVar(avatarName) chatString = eval('TTLocalizer.' + line[2]) quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[3:]) return Func(avatar.setPageChat, toonId, 0, chatString, quitButton, extraChatFlags, dialogueList) def parseLocalChatConfirm(self, line): lineLength = len(line) avatarName = line[1] avatar = self.getVar(avatarName) chatString = eval('TTLocalizer.' + line[2]) quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[3:]) return Func(avatar.setLocalPageChat, chatString, quitButton, extraChatFlags, dialogueList) def parseLocalChatPersist(self, line): lineLength = len(line) avatarName = line[1] avatar = self.getVar(avatarName) chatString = eval('TTLocalizer.' + line[2]) quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[3:]) if len(dialogueList) > 0: dialogue = dialogueList[0] else: dialogue = None return Func(avatar.setChatAbsolute, chatString, CFSpeech, dialogue) def parseLocalChatToConfirm(self, line): lineLength = len(line) avatarKey = line[1] avatar = self.getVar(avatarKey) toAvatarKey = line[2] toAvatar = self.getVar(toAvatarKey) localizerAvatarName = toAvatar.getName().capitalize() toAvatarName = eval('TTLocalizer.' + localizerAvatarName) chatString = eval('TTLocalizer.' + line[3]) chatString = chatString.replace('%s', toAvatarName) quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[4:]) return Func(avatar.setLocalPageChat, chatString, quitButton, extraChatFlags, dialogueList) def parseCCChatConfirm(self, line): lineLength = len(line) avatarName = line[1] avatar = self.getVar(avatarName) if self.toon.getStyle().gender == 'm': chatString = eval('TTLocalizer.' + line[2] % 'Mickey') else: chatString = eval('TTLocalizer.' + line[2] % 'Minnie') quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[3:]) return Func(avatar.setLocalPageChat, chatString, quitButton, extraChatFlags, dialogueList) def parseCCChatToConfirm(self, line): lineLength = len(line) avatarKey = line[1] avatar = self.getVar(avatarKey) toAvatarKey = line[2] toAvatar = self.getVar(toAvatarKey) localizerAvatarName = toAvatar.getName().capitalize() toAvatarName = eval('TTLocalizer.' + localizerAvatarName) if self.toon.getStyle().gender == 'm': chatString = eval('TTLocalizer.' + line[3] % 'Mickey') else: chatString = eval('TTLocalizer.' + line[3] % 'Minnie') chatString = chatString.replace('%s', toAvatarName) quitButton, extraChatFlags, dialogueList = self.parseExtraChatArgs(line[4:]) return Func(avatar.setLocalPageChat, chatString, quitButton, extraChatFlags, dialogueList) def parsePlaySfx(self, line): if len(line) == 2: token, sfxName = line looping = 0 elif len(line) == 3: token, sfxName, looping = line else: notify.error('invalid number of arguments') sfx = self.getVar(sfxName) return Func(base.playSfx, sfx, looping) def parseStopSfx(self, line): token, sfxName = line sfx = self.getVar(sfxName) return Func(sfx.stop) def parsePlayAnim(self, line): if len(line) == 3: token, actorName, animName = line playRate = 1.0 elif len(line) == 4: token, actorName, animName, playRate = line else: notify.error('invalid number of arguments') actor = self.getVar(actorName) return Sequence(Func(actor.setPlayRate, playRate, animName), Func(actor.play, animName)) def parseLoopAnim(self, line): if len(line) == 3: token, actorName, animName = line playRate = 1.0 elif len(line) == 4: token, actorName, animName, playRate = line else: notify.error('invalid number of arguments') actor = self.getVar(actorName) return Sequence(Func(actor.setPlayRate, playRate, animName), Func(actor.loop, animName)) def parseLerpPos(self, line): token, nodeName, x, y, z, t = line node = self.getVar(nodeName) return Sequence(LerpPosInterval(node, t, Point3(x, y, z), blendType='easeInOut'), duration=0.0) def parseLerpHpr(self, line): token, nodeName, h, p, r, t = line node = self.getVar(nodeName) return Sequence(LerpHprInterval(node, t, VBase3(h, p, r), blendType='easeInOut'), duration=0.0) def parseLerpScale(self, line): token, nodeName, x, y, z, t = line node = self.getVar(nodeName) return Sequence(LerpScaleInterval(node, t, VBase3(x, y, z), blendType='easeInOut'), duration=0.0) def parseLerpPosHprScale(self, line): token, nodeName, x, y, z, h, p, r, sx, sy, sz, t = line node = self.getVar(nodeName) return Sequence(LerpPosHprScaleInterval(node, t, VBase3(x, y, z), VBase3(h, p, r), VBase3(sx, sy, sz), blendType='easeInOut'), duration=0.0) def parseLerpColor(self, line): token, nodeName, sr, sg, sb, sa, er, eg, eb, ea, t = line node = self.getVar(nodeName) return Sequence(LerpColorInterval(node, t, VBase4(er, eg, eb, ea), startColorScale=VBase4(sr, sg, sb, sa), blendType='easeInOut'), duration=0.0) def parseLerpColorScale(self, line): token, nodeName, sr, sg, sb, sa, er, eg, eb, ea, t = line node = self.getVar(nodeName) return Sequence(LerpColorScaleInterval(node, t, VBase4(er, eg, eb, ea), startColorScale=VBase4(sr, sg, sb, sa), blendType='easeInOut'), duration=0.0) def parseDepthWriteOn(self, line): token, nodeName, depthWrite = line node = self.getVar(nodeName) return Sequence(Func(node.setDepthWrite, depthWrite)) def parseDepthWriteOff(self, line): token, nodeName = line node = self.getVar(nodeName) return Sequence(Func(node.clearDepthWrite)) def parseDepthTestOn(self, line): token, nodeName, depthTest = line node = self.getVar(nodeName) return Sequence(Func(node.setDepthTest, depthTest)) def parseDepthTestOff(self, line): token, nodeName = line node = self.getVar(nodeName) return Sequence(Func(node.clearDepthTest)) def parseSetBin(self, line): if len(line) == 3: token, nodeName, binName = line sortOrder = 0 else: token, nodeName, binName, sortOrder = line node = self.getVar(nodeName) return Sequence(Func(node.setBin, binName, sortOrder)) def parseClearBin(self, line): token, nodeName = line node = self.getVar(nodeName) return Sequence(Func(node.clearBin)) def parseWaitEvent(self, line): token, eventName = line return eventName def parseSendEvent(self, line): token, eventName = line return Func(messenger.send, eventName) def parseFunction(self, line): token, objectName, functionName = line object = self.getVar(objectName) cfunc = compile('object' + '.' + functionName, '<string>', 'eval') return Func(eval(cfunc)) def parseAddLaffMeter(self, line): token, maxHpDelta = line newMaxHp = maxHpDelta + self.toon.getMaxHp() newHp = newMaxHp laffMeter = self.getVar('laffMeter') return Func(laffMeter.adjustFace, newHp, newMaxHp) def parseLaffMeter(self, line): token, newHp, newMaxHp = line laffMeter = self.getVar('laffMeter') return Func(laffMeter.adjustFace, newHp, newMaxHp) def parseObscureLaffMeter(self, line): token, val = line return Func(self.toon.laffMeter.obscure, val) def parseAddInventory(self, line): token, track, level, number = line inventory = self.getVar('inventory') countSound = base.loadSfx('phase_3.5/audio/sfx/tick_counter.ogg') return Sequence(Func(base.playSfx, countSound), Func(inventory.buttonBoing, track, level), Func(inventory.addItems, track, level, number), Func(inventory.updateGUI, track, level)) def parseSetInventory(self, line): token, track, level, number = line inventory = self.getVar('inventory') return Sequence(Func(inventory.setItem, track, level, number), Func(inventory.updateGUI, track, level)) def parseSetInventoryYPos(self, line): token, track, level, yPos = line inventory = self.getVar('inventory') button = inventory.buttons[track][level].stateNodePath[0] text = button.find('**/+TextNode') return Sequence(Func(text.setY, yPos)) def parseSetInventoryDetail(self, line): if len(line) == 2: token, val = line elif len(line) == 4: token, val, track, level = line else: notify.error('invalid line for parseSetInventoryDetail: %s' % line) inventory = self.getVar('inventory') if val == -1: return Func(inventory.noDetail) elif val == 0: return Func(inventory.hideDetail) elif val == 1: return Func(inventory.showDetail, track, level) else: notify.error('invalid inventory detail level: %s' % val) def parseShowFriendsList(self, line): from toontown.friends import FriendsListPanel return Func(FriendsListPanel.showFriendsListTutorial) def parseHideFriendsList(self, line): from toontown.friends import FriendsListPanel return Func(FriendsListPanel.hideFriendsListTutorial) def parseShowBook(self, line): return Sequence(Func(self.toon.book.setPage, self.toon.mapPage), Func(self.toon.book.enter), Func(self.toon.book.disableBookCloseButton)) def parseEnableCloseBook(self, line): return Sequence(Func(self.toon.book.enableBookCloseButton)) def parseHideBook(self, line): return Func(self.toon.book.exit) def parseObscureBook(self, line): token, val = line return Func(self.toon.book.obscureButton, val) def parseObscureChat(self, line): token, val0, val1 = line return Func(self.toon.chatMgr.obscure, val0, val1) def parseArrowsOn(self, line): arrows = self.getVar('arrows') token, x1, y1, h1, x2, y2, h2 = line return Func(arrows.arrowsOn, x1, y1, h1, x2, y2, h2) def parseArrowsOff(self, line): arrows = self.getVar('arrows') return Func(arrows.arrowsOff) def parseStartThrob(self, line): token, nodeName, r, g, b, a, r2, g2, b2, a2, t = line node = self.getVar(nodeName) startCScale = Point4(r, g, b, a) destCScale = Point4(r2, g2, b2, a2) self.throbIval = Sequence(LerpColorScaleInterval(node, t / 2.0, destCScale, startColorScale=startCScale, blendType='easeInOut'), LerpColorScaleInterval(node, t / 2.0, startCScale, startColorScale=destCScale, blendType='easeInOut')) return Func(self.throbIval.loop) def parseStopThrob(self, line): return Func(self.throbIval.finish) def parseToonHead(self, line): if len(line) == 5: token, toonName, x, z, toggle = line scale = 1.0 else: token, toonName, x, z, toggle, scale = line toon = self.getVar(toonName) toonId = toon.getDoId() toonHeadFrame = self.toonHeads.get(toonId) if not toonHeadFrame: toonHeadFrame = ToonHeadFrame.ToonHeadFrame(toon) toonHeadFrame.tag1Node toonHeadFrame.hide() self.toonHeads[toonId] = toonHeadFrame self.setVar('%sToonHead' % toonName, toonHeadFrame) if toggle: return Sequence(Func(toonHeadFrame.setPos, x, 0, z), Func(toonHeadFrame.setScale, scale), Func(toonHeadFrame.show)) else: return Func(toonHeadFrame.hide) def parseToonHeadScale(self, line): token, toonName, scale = line toon = self.getVar(toonName) toonId = toon.getDoId() toonHeadFrame = self.toonHeads.get(toonId) return Func(toonHeadFrame.setScale, scale) def parseBlackCatListen(self, line): token, enable = line if enable: def phraseSaid(phraseId): toontastic = 315 if phraseId == toontastic: messenger.send('DistributedBlackCatMgr-activate') def enableBlackCatListen(): self.acceptOnce(SpeedChatGlobals.SCStaticTextMsgEvent, phraseSaid) return Func(enableBlackCatListen) else: def disableBlackCatListen(): self.ignore(SpeedChatGlobals.SCStaticTextMsgEvent) return Func(disableBlackCatListen) def parseThrowSquirtPreview(self, line): oldTrackAccess = [None] def grabCurTrackAccess(oldTrackAccess = oldTrackAccess): oldTrackAccess[0] = copy.deepcopy(base.localAvatar.getTrackAccess()) def restoreTrackAccess(oldTrackAccess = oldTrackAccess): base.localAvatar.setTrackAccess(oldTrackAccess[0]) minGagLevel = ToontownBattleGlobals.MIN_LEVEL_INDEX + 1 maxGagLevel = ToontownBattleGlobals.MAX_LEVEL_INDEX + 1 curGagLevel = minGagLevel track1 = base.localAvatar.getFirstTrackPicked() track2 = base.localAvatar.getSecondTrackPicked() def updateGagLevel(t, curGagLevel = curGagLevel): newGagLevel = int(round(t)) if newGagLevel == curGagLevel: return curGagLevel = newGagLevel tempTracks = base.localAvatar.getTrackAccess() tempTracks[track1] = curGagLevel tempTracks[track2] = curGagLevel base.localAvatar.setTrackAccess(tempTracks) return Sequence(Func(grabCurTrackAccess), LerpFunctionInterval(updateGagLevel, fromData=1, toData=7, duration=0.3), WaitInterval(3.5), LerpFunctionInterval(updateGagLevel, fromData=7, toData=1, duration=0.3), Func(restoreTrackAccess), Func(messenger.send, 'doneThrowSquirtPreview')) def parseSetMusicVolume(self, line): if base.config.GetString('language', 'english') == 'japanese': try: loader = base.cr.playGame.place.loader type = 'music' duration = 0 fromLevel = 1.0 if len(line) == 2: token, level = line elif len(line) == 3: token, level, type = line elif len(line) == 4: token, level, type, duration = line elif len(line) == 5: token, level, type, duration, fromLevel = line if type == 'battleMusic': music = loader.battleMusic elif type == 'activityMusic': music = loader.activityMusic else: music = loader.music if duration == 0: return Func(music.setVolume, level) else: def setVolume(level): music.setVolume(level) return LerpFunctionInterval(setVolume, fromData=fromLevel, toData=level, duration=duration) except AttributeError: pass else: return Wait(0.0) readFile()
{ "content_hash": "7d6872a4bf11481c32c6b280681e76b4", "timestamp": "", "source": "github", "line_count": 1095, "max_line_length": 290, "avg_line_length": 39.4027397260274, "alnum_prop": 0.5848050804246049, "repo_name": "Spiderlover/Toontown", "id": "480435fc346f74c03dfb7b74152e39bf05b1d94b", "size": "43146", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "toontown/quest/QuestParser.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "7774" }, { "name": "Python", "bytes": "17241353" }, { "name": "Shell", "bytes": "7699" } ], "symlink_target": "" }
import random import sys import time from oslo_log import log from oslo_config import cfg import oslo_messaging as messaging opts = [ cfg.StrOpt('server_id', help='A string uniquely identifying current instance. Used' 'by server to distinguish instances.'), ] CONF = cfg.CONF CONF.register_cli_opts(opts) log.register_options(CONF) rnd = random.Random() logger = None class RpcEndpoint(object): def example_method(self, ctxt, param1): logger.info('Somebody is calling example_method with param1=%s' % param1) sec = rnd.randint(30, 180) logger.info('Sleeping for %s seconds' % sec) time.sleep(sec) logger.info('Finished sleeping, returning result') return param1 * 4 def setup(): global logger CONF(sys.argv[1:], project='example_rpc_server') log.setup(CONF, 'example_rpc_server') logger = log.getLogger(__name__) def main(): setup() logger.info('Running example_rpc_server from main()') transport = messaging.get_transport(cfg.CONF) target = messaging.Target(topic='example_rpc', version='1.0', server=CONF.server_id) server = messaging.get_rpc_server(transport, target, endpoints=[RpcEndpoint()]) server.start() server.wait() if __name__ == '__main__': main()
{ "content_hash": "00fecda1f31f44b05f454f587ca75e9a", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 74, "avg_line_length": 22.365079365079364, "alnum_prop": 0.6139105748757985, "repo_name": "dmitrymex/example-oslo-messaging", "id": "f4ce44e2a5adae307909b7f41857f09f3b0dc827", "size": "2010", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "example_rpc_server.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "4001" } ], "symlink_target": "" }
import argparse import pygame from pygame.locals import * from localizations import initLocalization import engine import global_vars as g from constants import * parser = argparse.ArgumentParser(prog='PyORPG') parser.add_argument('-ip', help='connect to server on ip') parser.add_argument('-p', help='connect to server on port') parser.add_argument('--no-sound', help='disables sound', action='store_false') args = vars(parser.parse_args()) if args['ip'] != None: print "ip todo" pygame.display.set_caption(GAME_NAME) g.screenSurface = pygame.display.set_mode((g.SCREEN_WIDTH, g.SCREEN_HEIGHT)) if __name__ == "__main__": # initialize localisation initLocalization() # start game g.gameEngine = engine.Engine() g.gameEngine.init()
{ "content_hash": "29f00785d9bcb0b83b7ee26e8f2512e6", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 78, "avg_line_length": 27.178571428571427, "alnum_prop": 0.7201051248357424, "repo_name": "marcusmoller/pyorpg-client", "id": "191a3a8c0a901cc0f8ba130bd895d459680f41b4", "size": "808", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/pyorpg.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "500429" }, { "name": "Shell", "bytes": "60" } ], "symlink_target": "" }
"""gsutil exceptions. The exceptions in this module are for use across multiple different classes. """ from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import six NO_URLS_MATCHED_PREFIX = 'No URLs matched' NO_URLS_MATCHED_GENERIC = (NO_URLS_MATCHED_PREFIX + '. Do the files you\'re operating on exist?') NO_URLS_MATCHED_TARGET = NO_URLS_MATCHED_PREFIX + ': %s' if six.PY3: # StandardError was removed, so use the base exception type instead StandardError = Exception class AbortException(StandardError): """Exception raised when a user aborts a command that needs to do cleanup.""" def __init__(self, reason): StandardError.__init__(self) self.reason = reason def __repr__(self): return 'AbortException: %s' % self.reason def __str__(self): return 'AbortException: %s' % self.reason class CommandException(StandardError): """Exception raised when a problem is encountered running a gsutil command. This exception should be used to signal user errors or system failures (like timeouts), not bugs (like an incorrect param value). For the latter you should raise Exception so we can see where/how it happened via gsutil -D (which will include a stack trace for raised Exceptions). """ def __init__(self, reason, informational=False): """Instantiate a CommandException. Args: reason: Text describing the problem. informational: Indicates reason should be printed as FYI, not a failure. """ StandardError.__init__(self) self.reason = reason self.informational = informational def __repr__(self): return str(self) def __str__(self): return 'CommandException: %s' % self.reason class ControlCException(Exception): """Exception to report to analytics when the user exits via ctrl-C. This exception is never actually raised, but is used by analytics collection to provide a more descriptive name for user exit. """ pass class GcloudStorageTranslationError(Exception): """Exception raised when a gsutil command can't be translated to gcloud.""" pass class HashMismatchException(Exception): """Exception raised when data integrity validation fails.""" pass class IamChOnResourceWithConditionsException(Exception): """Raised when trying to use "iam ch" on an IAM policy with conditions. Because the syntax for conditions is fairly complex, it doesn't make sense to specify them on the command line using a colon-delimited set of values in the same way you'd specify simple bindings - it would be a complex and potentially surprising interface, which isn't what you want when dealing with permissions. Additionally, providing partial functionality -- e.g. if a policy contains bindings with conditions, still allow users to interact with bindings that don't contain conditions -- might sound tempting, but results in a bad user experience. Bindings can be thought of as a mapping from (role, condition) -> [members]. Thus, a user might think they're editing the binding for (role1, condition1), but they'd really be editing the binding for (role1, None). Thus, we just raise an error if we encounter a binding with conditions present, and encourage users to use "iam {get,set}" instead. """ def __init__(self, message): Exception.__init__(self, message) self.message = message def __repr__(self): return str(self) def __str__(self): return 'IamChOnResourceWithConditionsException: %s' % self.message class InvalidUrlError(Exception): """Exception raised when URL is invalid.""" def __init__(self, message): Exception.__init__(self, message) self.message = message def __repr__(self): return str(self) def __str__(self): return 'InvalidUrlError: %s' % self.message class ExternalBinaryError(Exception): """Exception raised when gsutil runs an external binary, and it fails.""" def __init__(self, message): Exception.__init__(self, message) self.message = message def __repr__(self): return 'ExternalBinaryError: %s' % self.message
{ "content_hash": "89bbdb22d217d7160ef427a3c263a83b", "timestamp": "", "source": "github", "line_count": 134, "max_line_length": 80, "avg_line_length": 31.223880597014926, "alnum_prop": 0.7148661567877629, "repo_name": "GoogleCloudPlatform/gsutil", "id": "0590fd108fe978b8ef1263c7d0664db6b891d46d", "size": "5301", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "gslib/exception.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "2175" }, { "name": "PowerShell", "bytes": "10051" }, { "name": "Python", "bytes": "3952149" }, { "name": "Shell", "bytes": "7031" } ], "symlink_target": "" }
import matplotlib.pyplot as plt from sklearn_evaluation.plot.util import requires_properties from sklearn_evaluation.report.serialize import EvaluatorHTMLSerializer from sklearn_evaluation.report.report import Report from .util import estimator_type, class_name from . import plot class ClassifierEvaluator(object): """ Encapsuates results from an estimator on a testing set to provide a simplified API from other modules. All parameters are optional, just fill the ones you need for your analysis. Parameters ---------- estimator : sklearn estimator Must have a ``feature_importances_`` attribute. y_true : array-like Target predicted classes (estimator predictions). y_pred : array-like Correct target values (ground truth). y_score : array-like Target scores (estimador predictions). feature_names : array-like Feature names. target_names : list List containing the names of the target classes estimator_name : str Identifier for the model. This can be later used to idenfity the estimator when generaing reports. """ TEMPLATE_NAME = 'classifier.md' def __init__(self, estimator=None, y_true=None, y_pred=None, y_score=None, feature_names=None, target_names=None, estimator_name=None, X=None): self._estimator = estimator self._y_true = y_true self._y_pred = y_pred self._y_score = y_score self._feature_names = feature_names self._target_names = target_names self._estimator_name = estimator_name self._X = X # TODO: perform basic logic checking, # raise Exception if necessary @property def estimator_type(self): """Estimator name (e.g. RandomForestClassifier) """ return estimator_type(self.estimator) @property def estimator_class(self): """Estimator class (e.g. sklearn.ensemble.RandomForestClassifier) """ return class_name(self.estimator) @property def estimator(self): return self._estimator @property def X(self): return self._X @property def y_true(self): return self._y_true @property def y_pred(self): # get predictions if possible if (self._y_pred is None and self.estimator is not None and self.X is not None): self._y_pred = self.estimator.predict(self.X) return self._y_pred @property def y_score(self): # get scores if possible if (self._y_score is None and self.estimator is not None and self.X is not None): self._y_score = self.estimator.predict_proba(self.X) return self._y_score @property def feature_names(self): return self._feature_names @property def target_names(self): return self._target_names @property def estimator_name(self): return self._estimator_name @requires_properties(('y_true', 'y_pred')) def confusion_matrix(self): """Confusion matrix plot """ return plot.confusion_matrix(self.y_true, self.y_pred, self.target_names, ax=_gen_ax()) @requires_properties(('y_true', 'y_score')) def roc(self): """ROC plot """ return plot.roc(self.y_true, self.y_score, ax=_gen_ax()) @requires_properties(('y_true', 'y_score')) def precision_recall(self): """Precision-recall plot """ return plot.precision_recall(self.y_true, self.y_score, ax=_gen_ax()) @requires_properties(('estimator',)) def feature_importances(self): """Feature importances plot """ return plot.feature_importances(self.estimator, feature_names=self.feature_names, ax=_gen_ax()) @requires_properties(('estimator',)) def feature_importances_table(self): """Feature importances table """ from . import table return table.feature_importances(self.estimator, feature_names=self.feature_names) @requires_properties(('y_true', 'y_score')) def precision_at_proportions(self): """Precision at proportions plot """ return plot.precision_at_proportions(self.y_true, self.y_score, ax=_gen_ax()) def html_serializable(self): """ Returns a EvaluatorHTMLSerializer instance, which is an object with the same methods and properties than a ClassifierEvaluator, but it returns HTML serialized versions of each (i.e. evaluator.feature_importances_table() returns a string with the table in HTML format, evaluator.confusion_matrix() returns a HTML image element with the image content encoded in base64), useful for generating reports using some template system """ return EvaluatorHTMLSerializer(self) def make_report(self, template=None): """ Make HTML report Parameters ---------- template: str, or pathlib.Path, optional HTML or Markdown template with jinja2 format. If a pathlib.Path object is passed, the content of the file is read. Within the template, the evaluator is passed as "e", so you can use things like {{e.confusion_matrix()}} or any other attribute/method. If None, a default template is used style: str Path to a css file to apply style to the report. If None, no style will be applied Returns ------- Report Returns the contents of the report if path is None. """ return Report(self.html_serializable(), template) def _gen_ax(): fig = plt.figure() ax = fig.add_subplot(111) return ax
{ "content_hash": "501597e71eb784dd4283dd8b27991b27", "timestamp": "", "source": "github", "line_count": 186, "max_line_length": 79, "avg_line_length": 32.38172043010753, "alnum_prop": 0.6055122032209862, "repo_name": "edublancas/sklearn-model-evaluation", "id": "345aa6a2cee38b389d03283274573f4b5184572e", "size": "6023", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/sklearn_evaluation/evaluator.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8484" }, { "name": "HTML", "bytes": "364346" }, { "name": "Python", "bytes": "48203" } ], "symlink_target": "" }
import sys from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, map_error, ) from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._inputs_operations import ( build_create_or_replace_request, build_delete_request, build_get_request, build_list_by_streaming_job_request, build_test_request, build_update_request, ) if sys.version_info >= (3, 8): from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports else: from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class InputsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.mgmt.streamanalytics.aio.StreamAnalyticsManagementClient`'s :attr:`inputs` attribute. """ models = _models def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create_or_replace( self, resource_group_name: str, job_name: str, input_name: str, input: _models.Input, if_match: Optional[str] = None, if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Input: """Creates an input or replaces an already existing input under an existing streaming job. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: The definition of the input that will be used to create a new input or replace the existing one under the streaming job. Required. :type input: ~azure.mgmt.streamanalytics.models.Input :param if_match: The ETag of the input. Omit this value to always overwrite the current input. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is None. :type if_match: str :param if_none_match: Set to '*' to allow a new input to be created, but to prevent updating an existing input. Other values will result in a 412 Pre-condition Failed response. Default value is None. :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_or_replace( self, resource_group_name: str, job_name: str, input_name: str, input: IO, if_match: Optional[str] = None, if_none_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Input: """Creates an input or replaces an already existing input under an existing streaming job. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: The definition of the input that will be used to create a new input or replace the existing one under the streaming job. Required. :type input: IO :param if_match: The ETag of the input. Omit this value to always overwrite the current input. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is None. :type if_match: str :param if_none_match: Set to '*' to allow a new input to be created, but to prevent updating an existing input. Other values will result in a 412 Pre-condition Failed response. Default value is None. :type if_none_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def create_or_replace( self, resource_group_name: str, job_name: str, input_name: str, input: Union[_models.Input, IO], if_match: Optional[str] = None, if_none_match: Optional[str] = None, **kwargs: Any ) -> _models.Input: """Creates an input or replaces an already existing input under an existing streaming job. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: The definition of the input that will be used to create a new input or replace the existing one under the streaming job. Is either a model type or a IO type. Required. :type input: ~azure.mgmt.streamanalytics.models.Input or IO :param if_match: The ETag of the input. Omit this value to always overwrite the current input. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is None. :type if_match: str :param if_none_match: Set to '*' to allow a new input to be created, but to prevent updating an existing input. Other values will result in a 412 Pre-condition Failed response. Default value is None. :type if_none_match: str :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.Input] content_type = content_type or "application/json" _json = None _content = None if isinstance(input, (IO, bytes)): _content = input else: _json = self._serialize.body(input, "Input") request = build_create_or_replace_request( resource_group_name=resource_group_name, job_name=job_name, input_name=input_name, subscription_id=self._config.subscription_id, if_match=if_match, if_none_match=if_none_match, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self.create_or_replace.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} if response.status_code == 200: response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) deserialized = self._deserialize("Input", pipeline_response) if response.status_code == 201: response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) deserialized = self._deserialize("Input", pipeline_response) if cls: return cls(pipeline_response, deserialized, response_headers) return deserialized create_or_replace.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}"} # type: ignore @overload async def update( self, resource_group_name: str, job_name: str, input_name: str, input: _models.Input, if_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Input: """Updates an existing input under an existing streaming job. This can be used to partially update (ie. update one or two properties) an input without affecting the rest the job or input definition. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: An Input object. The properties specified here will overwrite the corresponding properties in the existing input (ie. Those properties will be updated). Any properties that are set to null here will mean that the corresponding property in the existing input will remain the same and not change as a result of this PATCH operation. Required. :type input: ~azure.mgmt.streamanalytics.models.Input :param if_match: The ETag of the input. Omit this value to always overwrite the current input. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is None. :type if_match: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def update( self, resource_group_name: str, job_name: str, input_name: str, input: IO, if_match: Optional[str] = None, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Input: """Updates an existing input under an existing streaming job. This can be used to partially update (ie. update one or two properties) an input without affecting the rest the job or input definition. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: An Input object. The properties specified here will overwrite the corresponding properties in the existing input (ie. Those properties will be updated). Any properties that are set to null here will mean that the corresponding property in the existing input will remain the same and not change as a result of this PATCH operation. Required. :type input: IO :param if_match: The ETag of the input. Omit this value to always overwrite the current input. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is None. :type if_match: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def update( self, resource_group_name: str, job_name: str, input_name: str, input: Union[_models.Input, IO], if_match: Optional[str] = None, **kwargs: Any ) -> _models.Input: """Updates an existing input under an existing streaming job. This can be used to partially update (ie. update one or two properties) an input without affecting the rest the job or input definition. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: An Input object. The properties specified here will overwrite the corresponding properties in the existing input (ie. Those properties will be updated). Any properties that are set to null here will mean that the corresponding property in the existing input will remain the same and not change as a result of this PATCH operation. Is either a model type or a IO type. Required. :type input: ~azure.mgmt.streamanalytics.models.Input or IO :param if_match: The ETag of the input. Omit this value to always overwrite the current input. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes. Default value is None. :type if_match: str :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.Input] content_type = content_type or "application/json" _json = None _content = None if isinstance(input, (IO, bytes)): _content = input else: _json = self._serialize.body(input, "Input") request = build_update_request( resource_group_name=resource_group_name, job_name=job_name, input_name=input_name, subscription_id=self._config.subscription_id, if_match=if_match, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self.update.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) deserialized = self._deserialize("Input", pipeline_response) if cls: return cls(pipeline_response, deserialized, response_headers) return deserialized update.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}"} # type: ignore @distributed_trace_async async def delete( # pylint: disable=inconsistent-return-statements self, resource_group_name: str, job_name: str, input_name: str, **kwargs: Any ) -> None: """Deletes an input from the streaming job. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] cls = kwargs.pop("cls", None) # type: ClsType[None] request = build_delete_request( resource_group_name=resource_group_name, job_name=job_name, input_name=input_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.delete.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}"} # type: ignore @distributed_trace_async async def get(self, resource_group_name: str, job_name: str, input_name: str, **kwargs: Any) -> _models.Input: """Gets details about the specified input. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Input or the result of cls(response) :rtype: ~azure.mgmt.streamanalytics.models.Input :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] cls = kwargs.pop("cls", None) # type: ClsType[_models.Input] request = build_get_request( resource_group_name=resource_group_name, job_name=job_name, input_name=input_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.get.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) deserialized = self._deserialize("Input", pipeline_response) if cls: return cls(pipeline_response, deserialized, response_headers) return deserialized get.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}"} # type: ignore @distributed_trace def list_by_streaming_job( self, resource_group_name: str, job_name: str, select: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.Input"]: """Lists all of the inputs under the specified streaming job. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param select: The $select OData query parameter. This is a comma-separated list of structural properties to include in the response, or "\ *" to include all properties. By default, all properties are returned except diagnostics. Currently only accepts '*\ ' as a valid value. Default value is None. :type select: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either Input or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.streamanalytics.models.Input] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] cls = kwargs.pop("cls", None) # type: ClsType[_models.InputListResult] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_by_streaming_job_request( resource_group_name=resource_group_name, job_name=job_name, subscription_id=self._config.subscription_id, select=select, api_version=api_version, template_url=self.list_by_streaming_job.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: request = HttpRequest("GET", next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("InputListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data) list_by_streaming_job.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs"} # type: ignore async def _test_initial( self, resource_group_name: str, job_name: str, input_name: str, input: Optional[Union[_models.Input, IO]] = None, **kwargs: Any ) -> Optional[_models.ResourceTestStatus]: error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ResourceTestStatus]] content_type = content_type or "application/json" _json = None _content = None if isinstance(input, (IO, bytes)): _content = input else: if input is not None: _json = self._serialize.body(input, "Input") else: _json = None request = build_test_request( resource_group_name=resource_group_name, job_name=job_name, input_name=input_name, subscription_id=self._config.subscription_id, api_version=api_version, content_type=content_type, json=_json, content=_content, template_url=self._test_initial.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize("ResourceTestStatus", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _test_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test"} # type: ignore @overload async def begin_test( self, resource_group_name: str, job_name: str, input_name: str, input: Optional[_models.Input] = None, *, content_type: str = "application/json", **kwargs: Any ) -> AsyncLROPoller[_models.ResourceTestStatus]: """Tests whether an input’s datasource is reachable and usable by the Azure Stream Analytics service. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: If the input specified does not already exist, this parameter must contain the full input definition intended to be tested. If the input specified already exists, this parameter can be left null to test the existing input as is or if specified, the properties specified will overwrite the corresponding properties in the existing input (exactly like a PATCH operation) and the resulting input will be tested. Default value is None. :type input: ~azure.mgmt.streamanalytics.models.Input :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ResourceTestStatus or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.streamanalytics.models.ResourceTestStatus] :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def begin_test( self, resource_group_name: str, job_name: str, input_name: str, input: Optional[IO] = None, *, content_type: str = "application/json", **kwargs: Any ) -> AsyncLROPoller[_models.ResourceTestStatus]: """Tests whether an input’s datasource is reachable and usable by the Azure Stream Analytics service. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: If the input specified does not already exist, this parameter must contain the full input definition intended to be tested. If the input specified already exists, this parameter can be left null to test the existing input as is or if specified, the properties specified will overwrite the corresponding properties in the existing input (exactly like a PATCH operation) and the resulting input will be tested. Default value is None. :type input: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ResourceTestStatus or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.streamanalytics.models.ResourceTestStatus] :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async async def begin_test( self, resource_group_name: str, job_name: str, input_name: str, input: Optional[Union[_models.Input, IO]] = None, **kwargs: Any ) -> AsyncLROPoller[_models.ResourceTestStatus]: """Tests whether an input’s datasource is reachable and usable by the Azure Stream Analytics service. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param job_name: The name of the streaming job. Required. :type job_name: str :param input_name: The name of the input. Required. :type input_name: str :param input: If the input specified does not already exist, this parameter must contain the full input definition intended to be tested. If the input specified already exists, this parameter can be left null to test the existing input as is or if specified, the properties specified will overwrite the corresponding properties in the existing input (exactly like a PATCH operation) and the resulting input will be tested. Is either a model type or a IO type. Default value is None. :type input: ~azure.mgmt.streamanalytics.models.Input or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ResourceTestStatus or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.streamanalytics.models.ResourceTestStatus] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", "2021-10-01-preview") ) # type: Literal["2021-10-01-preview"] content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[_models.ResourceTestStatus] polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod] lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) cont_token = kwargs.pop("continuation_token", None) # type: Optional[str] if cont_token is None: raw_result = await self._test_initial( # type: ignore resource_group_name=resource_group_name, job_name=job_name, input_name=input_name, input=input, api_version=api_version, content_type=content_type, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs ) kwargs.pop("error_map", None) def get_long_running_output(pipeline_response): deserialized = self._deserialize("ResourceTestStatus", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output, ) return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_test.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test"} # type: ignore
{ "content_hash": "a6e8ffdf884c72424b55514a938db6d1", "timestamp": "", "source": "github", "line_count": 913, "max_line_length": 206, "avg_line_length": 46.68346111719606, "alnum_prop": 0.6443386044765614, "repo_name": "Azure/azure-sdk-for-python", "id": "52df56456794250102436bfeae5f6ab1e46e313c", "size": "43128", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/streamanalytics/azure-mgmt-streamanalytics/azure/mgmt/streamanalytics/aio/operations/_inputs_operations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
import urlparse from google.appengine.ext import ndb from consts.district_type import DistrictType from models.district import District from models.district_team import DistrictTeam from models.team import Team from models.robot import Robot class FMSAPITeamDetailsParser(object): def __init__(self, year): self.year = year def parse(self, response): """ Parse team info from FMSAPI Returns a tuple of: list of models (Team, DistrictTeam, Robot), and a Boolean indicating if there are more pages to be fetched """ # Get team json # don't need to null check, if error, HTTP code != 200, so we wont' get here current_page = response['pageCurrent'] total_pages = response['pageTotal'] teams = response['teams'] ret_models = [] for teamData in teams: # Fix issue where FIRST's API returns dummy website for all teams if teamData['website'] is not None and 'www.firstinspires.org' in teamData['website']: website = None else: raw_website = teamData.get('website', None) website = urlparse.urlparse(raw_website, 'http').geturl() if raw_website else None # Fix oddity with urlparse having three slashes after the scheme (#1635) website = website.replace('///', '//') if website else None team = Team( id="frc{}".format(teamData['teamNumber']), team_number=teamData['teamNumber'], name=teamData['nameFull'], nickname=teamData['nameShort'], school_name=teamData.get('schoolName'), home_cmp=teamData.get('homeCMP').lower() if teamData.get('homeCMP') else None, city=teamData['city'], state_prov=teamData['stateProv'], country=teamData['country'], website=website, rookie_year=teamData['rookieYear'] ) districtTeam = None if teamData['districtCode']: districtAbbrev = DistrictType.abbrevs[teamData['districtCode'].lower()] districtTeam = DistrictTeam( id=DistrictTeam.renderKeyName(self.year, districtAbbrev, team.key_name), team=ndb.Key(Team, team.key_name), year=self.year, district=districtAbbrev, district_key=ndb.Key(District, District.renderKeyName(self.year, teamData['districtCode'].lower())), ) robot = None if teamData['robotName']: robot = Robot( id=Robot.renderKeyName(team.key_name, self.year), team=ndb.Key(Team, team.key_name), year=self.year, robot_name=teamData['robotName'].strip() ) ret_models.append((team, districtTeam, robot)) return (ret_models, (current_page < total_pages))
{ "content_hash": "7b6a3356f288e787a46f798620e4e69e", "timestamp": "", "source": "github", "line_count": 77, "max_line_length": 120, "avg_line_length": 39.701298701298704, "alnum_prop": 0.5682041216879293, "repo_name": "nwalters512/the-blue-alliance", "id": "72c49a08f6ce8d07a7f5f97a9c46e96c4d6dc4ca", "size": "3057", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "datafeeds/parsers/fms_api/fms_api_team_details_parser.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "101" }, { "name": "CSS", "bytes": "374878" }, { "name": "HTML", "bytes": "715987" }, { "name": "JavaScript", "bytes": "402170" }, { "name": "PHP", "bytes": "10727" }, { "name": "Python", "bytes": "2080239" }, { "name": "Ruby", "bytes": "3494" }, { "name": "Shell", "bytes": "45" } ], "symlink_target": "" }
"""Razor2 check plugin.""" import threading import subprocess import oa.plugins.base def kill_process(process, log): log.debug("Razor timed out") process.kill() class Razor2Plugin(oa.plugins.base.BasePlugin): eval_rules = ("check_razor2", "check_razor2_range") options = {"use_razor2": ("bool", True), "razor_timeout": ("int", 5), "razor_config": ("str", "") } def check_razor2_range(self, msg, engine, min, max, target=None): """ Not implemented. Use pyzor in order to check range conditions. :param msg: :param engine: :param min: :param max: :param target: :return: """ pass def check_razor2(self, msg, full="", target=None): """ Checks a mail against the distributed Razor Catalogue by communicating with a Razor Catalogue Server. If we have returncode = 1 => it's not a spam If we have returncode = 0 => it's a spam :param msg: Message to be check :param full: Not used :param target: "None" by default :return:True if the message is listed on Rayzor """ if not self["use_razor2"]: return 0 try: return self.get_local(msg, "razor2_result") except KeyError: pass self.set_local(msg, "razor2_result", 0) if not self["razor_config"]: args = ["razor-check"] else: conf_arg = "-conf=" + self["razor_config"] args = ["razor-check", conf_arg] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except OSError as e: self.ctxt.log.error("Unable to run razor-check: %s", e) return my_timer = threading.Timer(self["razor_timeout"], kill_process, [proc, self.ctxt.log]) try: my_timer.start() proc.communicate(input=str.encode(msg.raw_msg)) finally: my_timer.cancel() if proc.returncode in (1, 0): self.set_local(msg, "razor2_result", proc.returncode) self.ctxt.log.debug(proc.returncode) return not proc.returncode def launch_subprocess(self, msg, name): if not self["razor_config"]: args = [name] else: conf_arg = "-conf=" + self["razor_config"] args = [name, conf_arg] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except OSError: self.ctxt.log.warning("Unable to run " + name) return my_timer = threading.Timer(self["razor_timeout"], kill_process, [proc, self.ctxt.log]) my_timer.start() try: proc.communicate(input=str.encode(msg.raw_msg)) return proc.returncode except (IOError, OSError): self.ctxt.log.warning("Unable to communicate to " + name) finally: my_timer.cancel() return False def plugin_report(self, msg): """Report the message to razor server as spam.""" self.launch_subprocess(msg, "razor-report") def plugin_revoke(self, msg): """Report the message to razor server as ham.""" self.launch_subprocess(msg, "razor-revoke")
{ "content_hash": "a21b5ac5ca6020f576d16bf6db408511", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 71, "avg_line_length": 31.387931034482758, "alnum_prop": 0.5292502059873662, "repo_name": "SpamExperts/OrangeAssassin", "id": "4c8827a8402be0939fb678e378c318c0130f84bf", "size": "3641", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "oa/plugins/razor2.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1485966" } ], "symlink_target": "" }
""" Command-line utility for administrative tasks. """ import os import sys if __name__ == "__main__": os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "DjangoUnityTutorial.settings" ) from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
{ "content_hash": "1c33331554dda945c3492658079ba656", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 64, "avg_line_length": 19.875, "alnum_prop": 0.6729559748427673, "repo_name": "eamonwoortman/django-unity3d-example", "id": "e9bcdf0c8805076e389c4d25d9044939e0521bfd", "size": "340", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Django/DjangoUnityTutorial/manage.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "2471911" }, { "name": "HTML", "bytes": "435" }, { "name": "Python", "bytes": "23776" } ], "symlink_target": "" }
from ..PulsePrimitives import * from ..Compiler import compile_to_hardware from ..PulseSequencePlotter import plot_pulse_files from .helpers import create_cal_seqs from itertools import product import operator from ..ControlFlow import * from ..TdmInstructions import * from functools import reduce from typing import Iterable, Union, Tuple @qfunction def qreset(qubits: Channels.LogicalChannel, signVec: Tuple[bool], measDelay: Union[int,float], buf: Union[int,float], reg_size: int = None, TDM_map: Iterable[Union[int,bool]] = None) -> list: """ For each qubit, build the set of feedback actions to perform when receiving a zero or one in the comparison register Parameters ---------- qubits : Channels.LogicalChannel tuple A hashable (immutable) tuple of qubits to reset signVec : boolean tuple A hashable (immutable) tuple of binary values from the compairison register indicating the measured state of each qubit in the register before reset. measDelay : int/float Delay after measurement before performing the LOADCMP comparison with value in the register (seconds) buf : int/float Wait time between (seconds) reg_size : int, optional Size of the register in number of qubits, including those not reset. Default value is set to len(qubits). TDM_map : bit mask, optional Map each qubit to a TDM digital input. If True, arguments reset a subset of the qubit register (see Reset). Default: np.array(qN, qN-1, ..., q1) from MSB to LSB. Returns ------- seq : QGL.ControlFlow.Call QGL sequence with the qreset calls Examples -------- >>> qreset((q1, q2), (0,1), 2e-6, 2e-6); CALL(H:) """ if not reg_size: reg_size = len(qubits) TDM_map = np.arange(reg_size,0,-1) FbGates = [] for ct, q in enumerate(qubits): if signVec[ct] == 0: FbGates.append([gate(q) for gate in [Id, X]]) else: # inverted logic FbGates.append([gate(q) for gate in [X, Id]]) FbSeq = [reduce(operator.mul, x) for x in product(*FbGates)] # load register seq = [Id(qubits[0], measDelay), qwait(kind='CMP'), Id(qubits[0], buf)] # create a branch for each possible comparison value for ct in range(2**reg_size): # duplicate branches for the irrelevant results # if reg_size > len(TDM_map) meas_result = [(ct & TDM_bit)>0 for TDM_bit in 2**(np.array(TDM_map)-1)] branch_idx = sum([t*2**(len(qubits)-ind-1) for ind,t in enumerate((meas_result))]) seq += qif(ct, [FbSeq[branch_idx]]) return seq def Reset(qubits: Iterable[Channels.LogicalChannel], measDelay: Union[int,float]=1e-6, signVec: Tuple[bool] = None, doubleRound: bool = True, buf: Union[int,float] = 20e-9, showPlot: bool = False, measChans: Channels.LogicalChannel = None, add_cals: bool = True, calRepeats: int = 2, reg_size: int = None, TDM_map: Iterable[Union[int,bool]]=None) -> str: """ Preparation, simultanoeus reset, and measurement of an arbitrary number of qubits Parameters ---------- qubits : Channels.LogicalChannel tuple A hashable (immutable) tuple of qubits to reset measDelay : int/float, optional Delay after measurement before performing the LOADCMP compairison with value in the register (seconds) signVec : boolean tuple, optional conditions for feedback. Tuple of 0 (flip if signal is above threshold) and 1 (flip if below) for each qubit. Default = 0 for all qubits doubleRound : boolean, optional If true, do two rounds of feedback showPlot : boolean, optional Whether to plot measChans : LogicalChannel tuple, optional A hashable (immutable) tuple of qubits to measured. add_cals : boolean, optional Whether to append calibration pulses to the end of the sequence calRepeats : int, optional How many times to repeat calibration scalings (default 2) reg_size : int, optional Size of the register in number of qubits, including those not reset. Default value is set to len(qubits). TDM_map : bit mask, optional Map each qubit to a TDM digital input. If True, arguments reset a subset of the qubit register (see Reset). Default: np.array(qN, qN-1, ..., q1) from MSB to LSB. Returns ------- metafile : string Path to a json metafile with details about the sequences and paths to compiled machine files Examples -------- >>> Reset((q1, q2)); Compiled 12 sequences. >>> mf '/path/to/exp/exp-meta.json' """ if measChans is None: measChans = qubits if signVec == None: signVec = (0, ) * len(qubits) seqs = [prep + [qreset(qubits, signVec, measDelay, buf, reg_size=reg_size, TDM_map=TDM_map)] for prep in create_cal_seqs(qubits, 1)] measBlock = reduce(operator.mul, [MEAS(q) for q in qubits]) if doubleRound: for seq in seqs: seq += [measBlock] seq.append(qreset(qubits, signVec, measDelay, buf, reg_size=reg_size, TDM_map=TDM_map)) # add final measurement for seq in seqs: seq += [measBlock, Id(qubits[0], measDelay), qwait(kind='CMP')] if add_cals: seqs += create_cal_seqs(qubits, calRepeats, measChans=measChans, waitcmp=True) metafile = compile_to_hardware(seqs, 'Reset/Reset') if showPlot: plot_pulse_files(metafile) return metafile # do not make it a subroutine for now def BitFlip3(data_qs: Iterable[Channels.LogicalChannel], ancilla_qs: Iterable[Channels.LogicalChannel], theta: Union[int,float] = None, phi: Union[int,float] = None, nrounds: int = 1, meas_delay: Union[int,float] = 1e-6, add_cals: bool = False, calRepeats: int = 2) -> str: """ Encoding on 3-qubit bit-flip code, followed by n rounds of syndrome detection, and final correction using the n results. Parameters ---------- data_qs : Channels.LogicalChannel tuple A hashable (immutable) tuple of qubits of the 3 code qubits ancilla_qs : Channels.LogicalChannel tuple A hashable (immutable) tuple of qubits of the 2 syndrome qubits theta : int/float, optional Longitudinal rotation angle for the encoded state (radians). Default = None. phi : int/float, optional Azimuthal rotation angle for the encoded state (radians). Default = None. nrounds: int, optional Number of consecutive measurements measDelay : int/float, optional Delay between syndrome check rounds (seconds) add_cals : boolean, optional Whether to append calibration pulses to the end of the sequence calRepeats : int, optional How many times to repeat calibration scalings (default 2) Returns ------- metafile : string Path to a json metafile with details about the sequences and paths to compiled machine files Examples -------- >>> mf = BitFlip3((q1, q2, q3), (q4, q5)); Compiled 12 sequences. >>> mf '/path/to/exp/exp-meta.json' """ if len(data_qs) != 3 or len(ancilla_qs) != 2: raise Exception("Wrong number of qubits") seqs = [ DecodeSetRounds(1,0,nrounds), Invalidate(10, 2*nrounds), Invalidate(11, 0x1)] # encode single-qubit state into 3 qubits if theta and phi: seqs+=[Utheta(data_qs[1], theta, phi), CNOT(data_qs[1], data_qs[0]), CNOT(data_qs[1], data_qs[2])] # multiple rounds of syndrome measurements for n in range(nrounds): seqs+= [CNOT(data_qs[0],ancilla_qs[0])*CNOT(data_qs[1],ancilla_qs[1])], seqs+= [CNOT(data_qs[1], ancilla_qs[0])*CNOT(data_qs[2],ancilla_qs[1])], seqs+= [MEASA(ancilla_qs[0], maddr=(10, 2*n))* MEASA(ancilla_qs[1], maddr=(10, 2*n+1)), Id(ancilla_qs[0], meas_delay), MEAS(data_qs[0], amp=0)* MEAS(data_qs[1], amp=0)* MEAS(data_qs[2], amp=0)] # virtual msmt's just to keep the number of segments # uniform across digitizer channels seqs+=Decode(10, 11, 2*nrounds) seqs+=qwait("RAM",11) seqs+=[MEAS(data_qs[0])* MEAS(data_qs[1])* MEAS(data_qs[2])* MEAS(ancilla_qs[0], amp=0)* MEAS(ancilla_qs[1], amp=0)] # virtual msmt's # apply corrective pulses depending on the decoder result FbGates = [] for q in data_qs: FbGates.append([gate(q) for gate in [Id, X]]) FbSeq = [reduce(operator.mul, x) for x in product(*FbGates)] for k in range(8): seqs += qif(k, [FbSeq[k]]) if add_cals: seqs += create_cal_seqs(qubits, calRepeats) metafile = compile_to_hardware(seqs, 'BitFlip/BitFlip', tdm_seq=True) return metafile def MajorityVoteN(qubits: Iterable[Channels.LogicalChannel], nrounds: int, prep: Iterable[bool] = [], meas_delay: float = 1e-6, add_cals: bool = False, calRepeats: int = 2) -> str: """ Majority vote across multiple measurement results (same or different qubits) Parameters ---------- qubits : Channels.LogicalChannel tuple A hashable (immutable) tuple of qubits for majority vote nrounds: int Number of consecutive measurements prep : boolean iterable, optional Array of binary values mapping X(q) pulses to the list of qubits proivided. Ex: (q1,q2), prep=(1,0) -> would apply a pi pulse to q1 before the majority vote measurement. Default = [] measDelay : int/float, optional Delay between syndrome check rounds (seconds) add_cals : boolean, optional Whether to append calibration pulses to the end of the sequence calRepeats : int, optional How many times to repeat calibration scalings (default 2) Returns ------- metafile : string Path to a json metafile with details about the sequences and paths to compiled machine files Examples -------- >>> mf = MajorityVoteN((q1, q2, q3), 10); Compiled 1 sequences. o INVALIDATE(channel=None, addr=0x1, mask=0x0) o WRITEADDR(channel=None, addr=0x1, value=0xfffff) MAJORITYMASK(in_addr=1, out_addr=0) o INVALIDATE(channel=None, addr=0xa, mask=0xfffff) o INVALIDATE(channel=None, addr=0xb, mask=0x1) MAJORITY(in_addr=a, out_addr=b) >>> mf '/path/to/exp/exp-meta.json' """ nqubits = len(qubits) seqs = [MajorityMask(1, 0, nrounds*nqubits), Invalidate(10, nrounds*nqubits), Invalidate(11, 1)] if prep: seqs += [reduce(operator.mul, [X(q) for n,q in enumerate(qubits) if prep[n]])] for n in range(nrounds): seqs += [reduce(operator.mul, [MEASA(q, (10, nqubits*n+m)) for m,q in enumerate(qubits)]), Id(qubits[0],meas_delay)] seqs+=MajorityVote(10,11, nrounds*nqubits) seqs+=qwait("RAM", 11) seqs+=[Id(qubits[0],100e-9)] seqs+=qif(1,[X(qubits[0])]) # placeholder for any conditional operation seqs=[seqs] if add_cals: seqs += create_cal_seqs(qubits, calRepeats) metafile = compile_to_hardware(seqs, 'MajorityVote/MajorityVote', tdm_seq=True) return metafile
{ "content_hash": "341bedf557dfea5b83718ce7aefc5651", "timestamp": "", "source": "github", "line_count": 337, "max_line_length": 144, "avg_line_length": 36.311572700296736, "alnum_prop": 0.5885429435319114, "repo_name": "BBN-Q/QGL", "id": "a70820df7762780e80889a5e3a31d2fd1a43d80d", "size": "12237", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "QGL/BasicSequences/Feedback.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "553405" }, { "name": "Shell", "bytes": "4146" } ], "symlink_target": "" }
"""Solve an arbitrary system""" __docformat__ = "restructuredtext en" import numpy as np import scipy as sp from scipy.sparse import isspmatrix_csr, isspmatrix_bsr, csr_matrix from pyamg import smoothed_aggregation_solver from pyamg.util.linalg import ishermitian, norm __all__ = ['solve', 'solver', 'solver_configuration'] def make_csr(A): """ Convert A to CSR, if A is not a CSR or BSR matrix already. Parameters ---------- A : {array, matrix, sparse matrix} (n x n) matrix to convert to CSR Returns ------- A : {csr_matrix, bsr_matrix} If A is csr_matrix or bsr_matrix, then do nothing and return A. Else, convert A to CSR if possible and return. Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.blackbox import make_csr >>> A = poisson((40,40),format='csc') >>> Acsr = make_csr(A) """ ## # Convert to CSR or BSR if necessary if not (isspmatrix_csr(A) or isspmatrix_bsr(A)): try: A = csr_matrix(A) print 'Implicit conversion of A to CSR in pyamg.blackbox.make_csr' except: raise TypeError('Argument A must have type csr_matrix or\ bsr_matrix, or be convertible to csr_matrix') # if A.shape[0] != A.shape[1]: raise TypeError('Argument A must be a square') # A = A.asfptype() return A def solver_configuration(A, B=None, verb=True): """ Given an arbitrary matrix A, generate a dictionary of parameters with which to generate a smoothed_aggregation_solver. Parameters ---------- A : {array, matrix, csr_matrix, bsr_matrix} (n x n) matrix to invert, CSR or BSR format preferred for efficiency B : {None, array} Near null-space modes used to construct the smoothed aggregation solver If None, the constant vector is used If (n x m) array, then B is passed to smoothed_aggregation_solver verb : {bool} If True, print verbose output during runtime Returns ------- config : {dict} A dictionary of solver configuration parameters that one uses to generate a smoothed aggregation solver Notes ----- The config dictionary contains the following parameter entries: symmetry, smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep See smoothed_aggregtion_solver for each parameter's description. Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg import solver_configuration >>> A = poisson((40,40),format='csr') >>> solver_config = solver_configuration(A,verb=False) """ # Ensure acceptable format of A A = make_csr(A) config = {} # Detect symmetry if ishermitian(A, fast_check=True): config['symmetry'] = 'hermitian' if verb: print " Detected a Hermitian matrix" else: config['symmetry'] = 'nonsymmetric' if verb: print " Detected a non-Hermitian matrix" # Symmetry dependent parameters if config['symmetry'] == 'hermitian': config['smooth'] = ('energy', {'krylov': 'cg', 'maxiter': 3, 'degree': 2, 'weighting': 'local'}) config['presmoother'] = ('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 1}) config['postsmoother'] = ('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 1}) else: config['smooth'] = ('energy', {'krylov': 'gmres', 'maxiter': 3, 'degree': 2, 'weighting': 'local'}) config['presmoother'] = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}) config['postsmoother'] = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}) # Determine near null-space modes B if B is None: # B is the constant for each variable in a node if isspmatrix_bsr(A) and A.blocksize[0] > 1: bsize = A.blocksize[0] config['B'] = np.kron(np.ones((A.shape[0] / bsize, 1), dtype=A.dtype), np.eye(bsize)) else: config['B'] = np.ones((A.shape[0], 1), dtype=A.dtype) elif (type(B) == type(np.zeros((1,)))) or\ (type(B) == type(sp.mat(np.zeros((1,))))): if len(B.shape) == 1: B = B.reshape(-1, 1) if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0): raise TypeError('Invalid dimensions of B, B.shape[0] must equal \ A.shape[0]') else: config['B'] = np.array(B, dtype=A.dtype) else: raise TypeError('Invalid B') if config['symmetry'] == 'hermitian': config['BH'] = None else: config['BH'] = config['B'].copy() # Set non-symmetry related parameters config['strength'] = ('evolution', {'k': 2, 'proj_type': 'l2', 'epsilon': 3.0}) config['max_levels'] = 15 config['max_coarse'] = 500 config['coarse_solver'] = 'pinv' config['aggregate'] = 'standard' config['keep'] = False return config def solver(A, config): """ Given a matrix A and a solver configuration dictionary, generate a smoothed_aggregation_solver Parameters ---------- A : {array, matrix, csr_matrix, bsr_matrix} Matrix to invert, CSR or BSR format preferred for efficiency config : {dict} A dictionary of solver configuration parameters that is used to generate a smoothed aggregation solver Returns ------- ml : {smoothed_aggregation_solver} smoothed aggregation hierarchy Notes ----- config must contain the following parameter entries for smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg import solver_configuration,solver >>> A = poisson((40,40),format='csr') >>> config = solver_configuration(A,verb=False) >>> ml = solver(A,config) """ # Convert A to acceptable format A = make_csr(A) # Generate smoothed aggregation solver try: return \ smoothed_aggregation_solver(A, B=config['B'], BH=config['BH'], smooth=config['smooth'], strength=config['strength'], max_levels=config['max_levels'], max_coarse=config['max_coarse'], coarse_solver=config['coarse_solver'], symmetry=config['symmetry'], aggregate=config['aggregate'], presmoother=config['presmoother'], postsmoother=config['postsmoother'], keep=config['keep']) except: raise TypeError('Failed generating smoothed_aggregation_solver') def solve(A, b, x0=None, tol=1e-5, maxiter=400, return_solver=False, existing_solver=None, verb=True): """ Solve the arbitrary system Ax=b with the best out-of-the box choice for a solver. The matrix A can be non-Hermitian, indefinite, Hermitian positive-definite, complex, etc... Generic and robust settings for smoothed_aggregation_solver(..) are used to invert A. Parameters ---------- A : {array, matrix, csr_matrix, bsr_matrix} Matrix to invert, CSR or BSR format preferred for efficiency b : {array} Right hand side. x0 : {array} : default random vector Initial guess tol : {float} : default 1e-5 Stopping criteria: relative residual r[k]/r[0] tolerance maxiter : {int} : default 400 Stopping criteria: maximum number of allowable iterations return_solver : {bool} : default False True: return the solver generated existing_solver : {smoothed_aggregation_solver} : default None If instance of a multilevel solver, then existing_solver is used to invert A, thus saving time on setup cost. verb : {bool} If True, print verbose output during runtime Returns ------- x : {array} Solution to Ax = b ml : multilevel_solver Optional return of the multilevel structure used for the solve Notes ----- If calling solve(...) multiple times for the same matrix, A, solver reuse is easy and efficient. Set "return_solver=True", and the return value will be a tuple, (x,ml), where ml is the solver used to invert A, and x is the solution to Ax=b. Then, the next time solve(...) is called, set "existing_solver=ml". Examples -------- >>> from numpy import arange, array >>> from pyamg import solve >>> from pyamg.gallery import poisson >>> from pyamg.util.linalg import norm >>> A = poisson((40,40),format='csr') >>> b = array(arange(A.shape[0]), dtype=float) >>> x = solve(A,b,verb=False) >>> print "%1.2e"%(norm(b - A*x)/norm(b)) 6.28e-06 """ # Convert A to acceptable CSR/BSR format A = make_csr(A) # Generate solver if necessary if existing_solver is None: # Parameter dictionary for smoothed_aggregation_solver config = solver_configuration(A, B=None, verb=verb) # Generate solver existing_solver = solver(A, config) else: if existing_solver.levels[0].A.shape[0] != A.shape[0]: raise TypeError('Argument existing_solver must have level 0 matrix\ of same size as A') # Krylov acceleration depends on symmetry of A if existing_solver.levels[0].A.symmetry == 'hermitian': accel = 'cg' else: accel = 'gmres' ## # Initial guess if x0 is None: x0 = np.array(sp.rand(A.shape[0],), dtype=A.dtype) ## # Callback function to print iteration number if verb: iteration = np.zeros((1,)) print " maxiter = %d" % maxiter def callback(x, iteration): iteration[0] = iteration[0] + 1 print " iteration %d" % iteration[0] callback2 = lambda x: callback(x, iteration) else: callback2 = None ## # Solve with accelerated Krylov method x = existing_solver.solve(b, x0=x0, accel=accel, tol=tol, maxiter=maxiter, callback=callback2) if verb: r0 = norm(np.ravel(b) - np.ravel(A * x0)) rk = norm(np.ravel(b) - np.ravel(A * x)) if r0 != 0.0: print " Residual reduction ||r_k||/||r_0|| = %1.2e" % (rk / r0) else: print " Residuals ||r_k||, ||r_0|| = %1.2e, %1.2e" % (rk, r0) if return_solver: return (x.reshape(b.shape), existing_solver) else: return x.reshape(b.shape)
{ "content_hash": "763b00bf4d4b8b86e215f6dec8e6753f", "timestamp": "", "source": "github", "line_count": 331, "max_line_length": 79, "avg_line_length": 34.05740181268882, "alnum_prop": 0.564978266654839, "repo_name": "pombreda/pyamg", "id": "477d9513e33e13ec49dbdecf1e956e95b6c8bfe2", "size": "11273", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyamg/blackbox.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "1112880" }, { "name": "CSS", "bytes": "9832" }, { "name": "Makefile", "bytes": "3249" }, { "name": "Matlab", "bytes": "2742" }, { "name": "Python", "bytes": "1215339" }, { "name": "Shell", "bytes": "558" }, { "name": "TeX", "bytes": "232" } ], "symlink_target": "" }
from django.db import models from django.db.models import Avg from django.contrib.auth.models import User from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator import json from rest_framework.authtoken.models import Token class Cuisine(models.Model): name = models.CharField(max_length=32) def __unicode__(self): return self.name class LocationType(models.Model): name = models.CharField(max_length=32) def __unicode__(self): return self.name class Place(models.Model): name = models.CharField(max_length=128) address = models.TextField() phone_number1 = models.TextField( null = True, blank = False, validators = [RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'")] ) phone_number2 = models.TextField( null = True, blank = False, validators = [RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'")] ) image_url = models.URLField() location_types = models.ManyToManyField(LocationType) cuisines = models.ManyToManyField(Cuisine) location_lat = models.FloatField() location_lon = models.FloatField() @property def average_stars(self): return Rating.objects.filter(place=self.id).aggregate(Avg('stars')).values()[0] def __unicode__(self): return "[{}, {}, {}, {}, {}, {}, {}]".format(self.name, str(self.address), str(self.phone_number1), str(self.phone_number2), str(self.image_url), str(self.location_lat), str(self.location_lon)) class RecommandationHistory(models.Model): user = models.ForeignKey(User, null=True) time = models.DateTimeField(auto_now_add=True) location_types = models.ManyToManyField(LocationType) cuisines = models.ManyToManyField(Cuisine) location_lat = models.FloatField(null=True) location_lon = models.FloatField(null=True) radius = models.FloatField(null=True) class Rating(models.Model): user = models.ForeignKey(User, null=True,blank=True) place = models.ForeignKey(Place) time = models.DateTimeField(auto_now_add=True) stars = models.SmallIntegerField( validators=[MinValueValidator(1), MaxValueValidator(5)] ) commentary = models.TextField() class Meta: unique_together = (("user", "place"),) def __unicode__(self): return "[{}, {}]".format(unicode(self.user), unicode(self.place))
{ "content_hash": "89fb435d5ef5944c974b3b8655091e45", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 201, "avg_line_length": 32.243589743589745, "alnum_prop": 0.6783300198807157, "repo_name": "FMI-B20/Yarr", "id": "3d862e544d2109a92b78402c33158dd9169143e2", "size": "2515", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8523" }, { "name": "HTML", "bytes": "19399" }, { "name": "JavaScript", "bytes": "13999" }, { "name": "Python", "bytes": "40641" } ], "symlink_target": "" }
from scapy import * import __builtin__ try: from cert import * CERT=1 except: CERT=0 ############################################################################# # Helpers ## ############################################################################# def get_cls(name, fallback_cls): return __builtin__.__dict__.get(name, fallback_cls) def strand(x,y): return "".join(map(lambda x,y:chr(ord(x) & ord(y)),x,y)) ############################################################################# ## Constants ## ############################################################################# ETH_P_IPV6 = 0x86dd OPENBSD=sys.platform.startswith("openbsd") FREEBSD=sys.platform.startswith("freebsd") NETBSD = sys.platform.startswith("netbsd") DARWIN=sys.platform.startswith("darwin") WINDOWS = sys.platform.startswith("win") if OPENBSD or FREEBSD or NETBSD or DARWIN: loname = "lo0" else: loname = "lo" # From net/ipv6.h on Linux (+ Additions) IPV6_ADDR_UNICAST = 0x01 IPV6_ADDR_MULTICAST = 0x02 IPV6_ADDR_CAST_MASK = 0x0F IPV6_ADDR_LOOPBACK = 0x10 IPV6_ADDR_GLOBAL = 0x00 IPV6_ADDR_LINKLOCAL = 0x20 IPV6_ADDR_SITELOCAL = 0x40 # deprecated since Sept. 2004 by RFC 3879 IPV6_ADDR_SCOPE_MASK = 0xF0 #IPV6_ADDR_COMPATv4 = 0x80 # deprecated; i.e. ::/96 #IPV6_ADDR_MAPPED = 0x1000 # i.e.; ::ffff:0.0.0.0/96 IPV6_ADDR_6TO4 = 0x0100 # Added to have more specific info (should be 0x0101 ?) IPV6_ADDR_UNSPECIFIED = 0x10000 ############################################################################# ############################################################################# ### Routing/Interfaces stuff ### ############################################################################# ############################################################################# def construct_source_candidate_set(addr, plen, laddr): """ Given all addresses assigned to a specific interface ('laddr' parameter), this function returns the "candidate set" associated with 'addr/plen'. Basically, the function filters all interface addresses to keep only those that have the same scope as provided prefix. This is on this list of addresses that the source selection mechanism will then be performed to select the best source address associated with some specific destination that uses this prefix. """ cset = [] if in6_isgladdr(addr): cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr) elif in6_islladdr(addr): cset = filter(lambda x: x[1] == IPV6_ADDR_LINKLOCAL, laddr) elif in6_issladdr(addr): cset = filter(lambda x: x[1] == IPV6_ADDR_SITELOCAL, laddr) elif in6_ismaddr(addr): if in6_ismnladdr(addr): cset = [('::1', 16, loname)] elif in6_ismgladdr(addr): cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr) elif in6_ismlladdr(addr): cset = filter(lambda x: x[1] == IPV6_ADDR_LINKLOCAL, laddr) elif in6_ismsladdr(addr): cset = filter(lambda x: x[1] == IPV6_ADDR_SITELOCAL, laddr) elif addr == '::' and plen == 0: cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr) cset = map(lambda x: x[0], cset) return cset def get_source_addr_from_candidate_set(dst, candidate_set): """ This function implement a limited version of source address selection algorithm defined in section 5 of RFC 3484. The format is very different from that described in the document because it operates on a set of candidate source address for some specific route. Rationale behind the implementation is to be able to make the right choice for a 6to4 destination when both a 6to4 address and a IPv6 native address are available for that interface. """ if len(candidate_set) == 0: # Should not happen return None if in6_isaddr6to4(dst): tmp = filter(lambda x: in6_isaddr6to4(x), candidate_set) if len(tmp) != 0: return tmp[0] return candidate_set[0] class Route6: def __init__(self): self.invalidate_cache() self.resync() def invalidate_cache(self): self.cache = {} def flush(self): self.invalidate_cache() self.routes = [] def resync(self): # TODO : At the moment, resync will drop existing Teredo routes # if any. Change that ... self.invalidate_cache() self.routes = read_routes6() if self.routes == []: log_loading.info("No IPv6 support in kernel") def __repr__(self): rtlst = [('Destination', 'Next Hop', "iface", "src candidates")] for net,msk,gw,iface,cset in self.routes: rtlst.append(('%s/%i'% (net,msk), gw, iface, ", ".join(cset))) colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, rtlst)) fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth)) rt = "\n".join(map(lambda x: fmt % x, rtlst)) return rt # Unlike Scapy's Route.make_route() function, we do not have 'host' and 'net' # parameters. We only have a 'dst' parameter that accepts 'prefix' and # 'prefix/prefixlen' values. # WARNING: Providing a specific device will at the moment not work correctly. def make_route(self, dst, gw=None, dev=None): """Internal function : create a route for 'dst' via 'gw'. """ prefix, plen = (dst.split("/")+["128"])[:2] plen = int(plen) if gw is None: gw = "::" if dev is None: dev, ifaddr, x = self.route(gw) else: # TODO: do better than that # replace that unique address by the list of all addresses lifaddr = in6_getifaddr() devaddrs = filter(lambda x: x[2] == dev, lifaddr) ifaddr = construct_source_candidate_set(prefix, plen, devaddrs) return (prefix, plen, gw, dev, ifaddr) def add(self, *args, **kargs): """Ex: add(dst="2001:db8:cafe:f000::/56") add(dst="2001:db8:cafe:f000::/56", gw="2001:db8:cafe::1") add(dst="2001:db8:cafe:f000::/64", gw="2001:db8:cafe::1", dev="eth0") """ self.invalidate_cache() self.routes.append(self.make_route(*args, **kargs)) def delt(self, dst, gw=None): """ Ex: delt(dst="::/0") delt(dst="2001:db8:cafe:f000::/56") delt(dst="2001:db8:cafe:f000::/56", gw="2001:db8:deca::1") """ tmp = dst+"/128" dst, plen = tmp.split('/')[:2] dst = in6_ptop(dst) plen = int(plen) l = filter(lambda x: in6_ptop(x[0]) == dst and x[1] == plen, self.routes) if gw: gw = in6_ptop(gw) l = filter(lambda x: in6_ptop(x[0]) == gw, self.routes) if len(l) == 0: warning("No matching route found") elif len(l) > 1: warning("Found more than one match. Aborting.") else: i=self.routes.index(l[0]) self.invalidate_cache() del(self.routes[i]) def ifchange(self, iff, addr): the_addr, the_plen = (addr.split("/")+["128"])[:2] the_plen = int(the_plen) naddr = inet_pton(socket.AF_INET6, the_addr) nmask = in6_cidr2mask(the_plen) the_net = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr)) for i in range(len(self.routes)): net,plen,gw,iface,addr = self.routes[i] if iface != iff: continue if gw == '::': self.routes[i] = (the_net,the_plen,gw,iface,the_addr) else: self.routes[i] = (net,the_plen,gw,iface,the_addr) self.invalidate_cache() ip6_neigh_cache.flush() def ifdel(self, iff): """ removes all route entries that uses 'iff' interface. """ new_routes=[] for rt in self.routes: if rt[3] != iff: new_routes.append(rt) self.invalidate_cache() self.routes = new_routes def ifadd(self, iff, addr): """ Add an interface 'iff' with provided address into routing table. Ex: ifadd('eth0', '2001:bd8:cafe:1::1/64') will add following entry into Scapy6 internal routing table: Destination Next Hop iface Def src @ 2001:bd8:cafe:1::/64 :: eth0 2001:bd8:cafe:1::1 prefix length value can be omitted. In that case, a value of 128 will be used. """ addr, plen = (addr.split("/")+["128"])[:2] addr = in6_ptop(addr) plen = int(plen) naddr = inet_pton(socket.AF_INET6, addr) nmask = in6_cidr2mask(plen) prefix = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr)) self.invalidate_cache() self.routes.append((prefix,plen,'::',iff,[addr])) def route(self, dst, dev=None): """ Provide best route to IPv6 destination address, based on Scapy6 internal routing table content. When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address of the set is used. Be aware of that behavior when using wildcards in upper parts of addresses ! If 'dst' parameter is a FQDN, name resolution is performed and result is used. if optional 'dev' parameter is provided a specific interface, filtering is performed to limit search to route associated to that interface. """ # Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set dst = dst.split("/")[0] savedst = dst # In case following inet_pton() fails dst = dst.replace("*","0") l = dst.find("-") while l >= 0: m = (dst[l:]+":").find(":") dst = dst[:l]+dst[l+m:] l = dst.find("-") try: inet_pton(socket.AF_INET6, dst) except socket.error: dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0] # TODO : Check if name resolution went well # Deal with dev-specific request for cache search k = dst if dev is not None: k = dst + "%%" + dev if k in self.cache: return self.cache[k] pathes = [] # TODO : review all kinds of addresses (scope and *cast) to see # if we are able to cope with everything possible. I'm convinced # it's not the case. # -- arnaud for p, plen, gw, iface, cset in self.routes: if dev is not None and iface != dev: continue if in6_isincluded(dst, p, plen): pathes.append((plen, (iface, cset, gw))) elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])): pathes.append((plen, (iface, cset, gw))) if not pathes: warning("No route found for IPv6 destination %s (no default route?)" % dst) return (loname, "::", "::") # XXX Linux specific pathes.sort() pathes.reverse() best_plen = pathes[0][0] pathes = filter(lambda x: x[0] == best_plen, pathes) res = [] for p in pathes: # Here we select best source address for every route tmp = p[1] srcaddr = get_source_addr_from_candidate_set(dst, p[1][1]) if srcaddr is not None: res.append((p[0], (tmp[0], srcaddr, tmp[2]))) # Symptom : 2 routes with same weight (our weight is plen) # Solution : # - dst is unicast global. Check if it is 6to4 and we have a source # 6to4 address in those available # - dst is link local (unicast or multicast) and multiple output # interfaces are available. Take main one (conf.iface) # - if none of the previous or ambiguity persists, be lazy and keep # first one # XXX TODO : in a _near_ future, include metric in the game if len(res) > 1: tmp = [] if in6_isgladdr(dst) and in6_isaddr6to4(dst): # TODO : see if taking the longest match between dst and # every source addresses would provide better results tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res) elif in6_ismaddr(dst) or in6_islladdr(dst): # TODO : I'm sure we are not covering all addresses. Check that tmp = filter(lambda x: x[1][0] == conf.iface, res) if tmp: res = tmp # Fill the cache (including dev-specific request) k = dst if dev is not None: k = dst + "%%" + dev self.cache[k] = res[0][1] return res[0][1] def get_if_raw_addr6(iff): """ Returns the main global unicast address associated with provided interface, in network format. If no global address is found, None is returned. """ r = filter(lambda x: x[2] == iff and x[1] == IPV6_ADDR_GLOBAL, in6_getifaddr()) if len(r) == 0: return None else: r = r[0][0] return inet_pton(socket.AF_INET6, r) if LINUX: def in6_getifaddr(): """ Returns a list of 3-tuples of the form (addr, scope, iface) where 'addr' is the address of scope 'scope' associated to the interface 'ifcace'. This is the list of all addresses of all interfaces available on the system. """ ret = [] try: f = open("/proc/net/if_inet6","r") except IOError, err: return ret l = f.readlines() for i in l: # addr, index, plen, scope, flags, ifname tmp = i.split() addr = struct.unpack('4s4s4s4s4s4s4s4s', tmp[0]) addr = in6_ptop(':'.join(addr)) ret.append((addr, int(tmp[3], 16), tmp[5])) # (addr, scope, iface) return ret def read_routes6(): try: f = open("/proc/net/ipv6_route","r") except IOError, err: return [] # 1. destination network # 2. destination prefix length # 3. source network displayed # 4. source prefix length # 5. next hop # 6. metric # 7. reference counter (?!?) # 8. use counter (?!?) # 9. flags # 10. device name routes = [] def proc2r(p): ret = struct.unpack('4s4s4s4s4s4s4s4s', p) ret = ':'.join(ret) return in6_ptop(ret) lifaddr = in6_getifaddr() for l in f.readlines(): d,dp,s,sp,nh,m,rc,us,fl,dev = l.split() fl = int(fl, 16) if fl & RTF_UP == 0: continue if fl & RTF_REJECT: continue d = proc2r(d) ; dp = int(dp, 16) s = proc2r(s) ; sp = int(sp, 16) nh = proc2r(nh) cset = [] # candidate set (possible source addresses) if dev == loname: if d == '::': continue cset = ['::1'] else: devaddrs = filter(lambda x: x[2] == dev, lifaddr) cset = construct_source_candidate_set(d, dp, devaddrs) if len(cset) != 0: routes.append((d, dp, nh, dev, cset)) f.close() return routes elif WINDOWS: def in6_getifaddr(): """ Returns a list of 3-tuples of the form (addr, scope, iface) where 'addr' is the address of scope 'scope' associated to the interface 'ifcace'. This is the list of all addresses of all interfaces available on the system. """ ret = [] # Just some dummy values for now xx = "::1" scope = 128 ifname = loname ret.append(xx, scope, ifname) return ret def read_routes6(): routes = [] # Just some dummy values for now d = '::' dp = 0 nh = '::' dev = loname cset = ['::1'] routes.append((d, dp, nh, dev, cset)) return routes else: def in6_getifaddr(): """ Returns a list of 3-tuples of the form (addr, scope, iface) where 'addr' is the address of scope 'scope' associated to the interface 'ifcace'. This is the list of all addresses of all interfaces available on the system. """ ret = [] i = dnet.intf() for int in i: ifname = int['name'] v6 = [] if int.has_key('alias_addrs'): v6 = int['alias_addrs'] for a in v6: if a.type != dnet.ADDR_TYPE_IP6: continue xx = str(a).split('/')[0] addr = in6_ptop(xx) scope = in6_getscope(addr) ret.append((xx, scope, ifname)) return ret def read_routes6(): f = os.popen("netstat -rn -f inet6") ok = -1 routes = [] lifaddr = in6_getifaddr() for l in f.readlines(): if not l: break l = l.strip() if ok < 0: ok = l.find('Destination') continue # gv 12/12/06: under debugging if NETBSD or OPENBSD: d,nh,fl,_,_,_,dev = l.split()[:7] else: # FREEBSD or DARWIN d,nh,fl,dev = l.split()[:4] if filter(lambda x: x[2] == dev, lifaddr) == []: continue if 'L' in fl: # drop MAC addresses continue if 'link' in nh: nh = '::' cset = [] # candidate set (possible source addresses) dp = 128 if d == 'default': d = '::' dp = 0 if '/' in d: d,dp = d.split("/") dp = int(dp) if '%' in d: d,dev = d.split('%') if '%' in nh: nh,dev = nh.split('%') if loname in dev: cset = ['::1'] nh = '::' else: devaddrs = filter(lambda x: x[2] == dev, lifaddr) cset = construct_source_candidate_set(d, dp, devaddrs) if len(cset) != 0: routes.append((d, dp, nh, dev, cset)) f.close() return routes ########################## ## Neighbor cache stuff ## ########################## NEIGHTIMEOUT=120 def neighsol(addr, src, iface, timeout=1, chainCC=0): """ Sends an ICMPv6 Neighbor Solicitation message to get the MAC address of the neighbor with specified IPv6 address addr. 'src' address is used as source of the message. Message is sent on iface. By default, timeout waiting for an answer is 1 second. If no answer is gathered, None is returned. Else, the answer is returned (ethernet frame). """ nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr)) d = inet_ntop(socket.AF_INET6, nsma) dm = in6_getnsmac(nsma) p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255) p /= ICMPv6ND_NS(tgt=addr) p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface)) res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0, chainCC=chainCC) return res class neighborCache: # TODO : add some method to modify default value for timeout # TODO : See what we can do for updating the neighbor cache # when receiving a packet. # Note: internally, our neighbor cache is scapy's arp_cache. This allows us # to have it updated when returning from sr() (a fork is done where a # fork is done and the updated cache returned at the end. def __init__(self): self.neighcache = arp_cache def flush(self, statictoo=True): self.neighcache = {} def __repr__(self): res = [("Peer", "Link layer address", "State")] for addr in self.neighcache.keys(): try: inet_pton(socket.AF_INET6, addr) except: continue cur_entry = self.neighcache[addr] status = "REACHABLE" last_contact = cur_entry[1] if last_contact == 0: status = "STATIC" elif ((time.time() - last_contact) < NEIGHTIMEOUT): status = "REACHABLE" else: status = "STALE" res.append((addr, cur_entry[0], status)) colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, res)) fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth)) res = "\n".join(map(lambda x: fmt % x, res)) return res def addNeighbor(self, ip6, mac, static=False): """ Add a neighbor to the cache. If optional parameter 'static' is not set to True (the default), the entry will expire in 2 minutes. If 'static' is set to True, the entry in the neighbor cache is made static. This is practical in those cases : - peer's address is not advertised to be on-link - peer doed not answer to NS - you don't want to make queries to keep time or be stealthy, ... """ t = 0 if not static: t = time.time() self.neighcache[ip6] = (mac, t) def makeStatic(self, ip6): """ make the entry static in Scapy6 internal neighbor cache for 'ip6' neighbor. """ if self.neighcache.has_key(ip6): mac = self.neighcache[ip6][0] self.neighcache[ip6] = (mac, 0) else: warning("Unable to make neighbor cache entry for %s static. It does not exist." % ip6) def removeStatic(self, ip6): """ remove the static status for 'ip6' entry in Scapy6 internal neighbor cache. """ if self.neighcache.has_key(ip6): mac = self.neighcache[ip6][0] self.neighcache[ip6] = (mac, time.time()) else: warning("Unable to make neighbor cache entry for %s static. It does not exist." % ip6) def get(self, ip6, chainCC=0): """ Returns the link layer address to use for IPv6 traffic to 'ip6' address. If searched IPv6 address is multicast, then, ethernet address is computed. If that's not the case, Scapy6 routing table is used to find next hop for provided address. If one is found, cache is searched. If a valid (REACHABLE or STATIC) entry exist, content is returned. Else, resolution is performed by sending a Neighbor Solicitation. In all cases, if lookup fails, None is returned. """ if in6_ismaddr(ip6): # Multicast mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) return mac iff,a,nh = conf.route6.route(ip6, dev=conf.iface) if iff == loname: return "ff:ff:ff:ff:ff:ff" if nh != '::': ip6 = nh # Found next hop if self.neighcache.has_key(ip6): # search the cache mac, timeout = self.neighcache[ip6] if timeout and (time.time()-timeout < NEIGHTIMEOUT): return mac res = neighsol(ip6, a, iff, chainCC=chainCC) if res is not None: mac = res.src self.neighcache[ip6] = (mac,time.time()) return mac return None ip6_neigh_cache = neighborCache() def getmacbyip6(ip6, chainCC=0): """ Returns the mac address to be used for provided 'ip6' peer. neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed) """ return ip6_neigh_cache.get(ip6, chainCC=chainCC) ############################################################################# ############################################################################# ### IPv6 addresses manipulation routines ### ############################################################################# ############################################################################# class Net6(Gen): # syntax ex. fec0::/126 """Generate a list of IPv6s from a network address or a name""" name = "ipv6" ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$") def __init__(self, net): self.repr = net tmp = net.split('/')+["128"] if not self.ipaddress.match(net): tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0] netmask = int(tmp[1]) self.net = inet_pton(socket.AF_INET6, tmp[0]) self.mask = in6_cidr2mask(netmask) self.plen = netmask def __iter__(self): def m8(i): if i % 8 == 0: return i tuple = filter(lambda x: m8(x), xrange(8, 129)) a = in6_and(self.net, self.mask) tmp = map(lambda x: x, struct.unpack('16B', a)) def parse_digit(a, netmask): netmask = min(8,max(netmask,0)) a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1) return a self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple)) def rec(n, l): if n and n % 2 == 0: sep = ':' else: sep = '' if n == 16: return l else: ll = [] for i in xrange(*self.parsed[n]): for y in l: ll += [y+sep+'%.2x'%i] return rec(n+1, ll) return iter(rec(0, [''])) def __repr__(self): return "<Net6 %s>" % self.repr # Think before modify it : for instance, FE::1 does exist and is unicast # there are many others like that. # TODO : integrate Unique Local Addresses def in6_getAddrType(addr): naddr = inet_pton(socket.AF_INET6, addr) paddr = inet_ntop(socket.AF_INET6, naddr) # normalize addrType = 0 # _Assignable_ Global Unicast Address space # is defined in RFC 3513 as those in 2000::/3 if ((struct.unpack("B", naddr[0])[0] & 0xE0) == 0x20): addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_GLOBAL) if naddr[:2] == ' \x02': # Mark 6to4 @ addrType |= IPV6_ADDR_6TO4 elif naddr[0] == '\xff': # multicast addrScope = paddr[3] if addrScope == '2': addrType = (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_MULTICAST) elif addrScope == 'e': addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST) else: addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST) elif ((naddr[0] == '\xfe') and ((int(paddr[2], 16) & 0xC) == 0x8)): addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL) elif paddr == "::1": addrType = IPV6_ADDR_LOOPBACK elif paddr == "::": addrType = IPV6_ADDR_UNSPECIFIED else: # Everything else is global unicast (RFC 3513) # Even old deprecated (RFC3879) Site-Local addresses addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_UNICAST) return addrType def find_ifaddr2(addr, plen, laddr): dstAddrType = in6_getAddrType(addr) if dstAddrType == IPV6_ADDR_UNSPECIFIED: # Shouldn't happen as dst addr return None if dstAddrType == IPV6_ADDR_LOOPBACK: return None tmp = [[]] + map(lambda (x,y,z): (in6_getAddrType(x), x, y, z), laddr) def filterSameScope(l, t): if (t[0] & dstAddrType & IPV6_ADDR_SCOPE_MASK) == 0: l.append(t) return l sameScope = reduce(filterSameScope, tmp) l = len(sameScope) if l == 1: # Only one address for our scope return sameScope[0][1] elif l > 1: # Muliple addresses for our scope stfAddr = filter(lambda x: x[0] & IPV6_ADDR_6TO4, sameScope) nativeAddr = filter(lambda x: not (x[0] & IPV6_ADDR_6TO4), sameScope) if not (dstAddrType & IPV6_ADDR_6TO4): # destination is not 6to4 if len(nativeAddr) != 0: return nativeAddr[0][1] return stfAddr[0][1] else: # Destination is 6to4, try to use source 6to4 addr if any if len(stfAddr) != 0: return stfAddr[0][1] return nativeAddr[0][1] else: return None def in6_mactoifaceid(mac, ulbit=None): """ Compute the interface ID in modified EUI-64 format associated to the Ethernet address provided as input. value taken by U/L bit in the interface identifier is basically the reversed value of that in given MAC address it can be forced to a specific value by using optional 'ulbit' parameter. """ if len(mac) != 17: return None m = "".join(mac.split(':')) if len(m) != 12: return None first = int(m[0:2], 16) if ulbit is None or not (ulbit == 0 or ulbit == 1): ulbit = [1,'-',0][first & 0x02] ulbit *= 2 first = "%.02x" % ((first & 0xFD) | ulbit) eui64 = first + m[2:4] + ":" + m[4:6] + "FF:FE" + m[6:8] + ":" + m[8:12] return eui64.upper() def in6_ifaceidtomac(ifaceid): # TODO: finish commenting function behavior """ Extract the mac address from provided iface ID. Iface ID is provided in printable format ("XXXX:XXFF:FEXX:XXXX", eventually compressed). None is returned on error. """ try: ifaceid = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:16] except: return None if ifaceid[3:5] != '\xff\xfe': return None first = struct.unpack("B", ifaceid[:1])[0] ulbit = 2*[1,'-',0][first & 0x02] first = struct.pack("B", ((first & 0xFD) | ulbit)) oui = first + ifaceid[1:3] end = ifaceid[5:] l = map(lambda x: "%.02x" % struct.unpack("B", x)[0], list(oui+end)) return ":".join(l) def in6_addrtomac(addr): """ Extract the mac address from provided address. None is returned on error. """ mask = inet_pton(socket.AF_INET6, "::ffff:ffff:ffff:ffff") x = in6_and(mask, inet_pton(socket.AF_INET6, addr)) ifaceid = inet_ntop(socket.AF_INET6, x)[2:] return in6_ifaceidtomac(ifaceid) def in6_addrtovendor(addr): """ Extract the MAC address from a modified EUI-64 constructed IPv6 address provided and use the IANA oui.txt file to get the vendor. The database used for the conversion is the one loaded by Scapy, based on Wireshark (/usr/share/wireshark/wireshark/manuf) None is returned on error, "UNKNOWN" if the vendor is unknown. """ mac = in6_addrtomac(addr) if mac is None: return None res = conf.manufdb._get_manuf(mac) if len(res) == 17 and res.count(':') != 5: # Mac address, i.e. unknown res = "UNKNOWN" return res def in6_getLinkScopedMcastAddr(addr, grpid=None, scope=2): """ Generate a Link-Scoped Multicast Address as described in RFC 4489. Returned value is in printable notation. 'addr' parameter specifies the link-local address to use for generating Link-scoped multicast address IID. By default, the function returns a ::/96 prefix (aka last 32 bits of returned address are null). If a group id is provided through 'grpid' parameter, last 32 bits of the address are set to that value (accepted formats : '\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896). By default, generated address scope is Link-Local (2). That value can be modified by passing a specific 'scope' value as an argument of the function. RFC 4489 only authorizes scope values <= 2. Enforcement is performed by the function (None will be returned). If no link-local address can be used to generate the Link-Scoped IPv6 Multicast address, or if another error occurs, None is returned. """ if not scope in [0, 1, 2]: return None try: if not in6_islladdr(addr): return None addr = inet_pton(socket.AF_INET6, addr) except: warning("in6_getLinkScopedMcastPrefix(): Invalid address provided") return None iid = addr[8:] if grpid is None: grpid = '\x00\x00\x00\x00' else: if type(grpid) is str: if len(grpid) == 8: try: grpid = int(grpid, 16) & 0xffffffff except: warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided") return None elif len(grpid) == 4: try: grpid = struct.unpack("!I", grpid)[0] except: warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided") return None grpid = struct.pack("!I", grpid) flgscope = struct.pack("B", 0xff & ((0x3 << 4) | scope)) plen = '\xff' res = '\x00' a = '\xff' + flgscope + res + plen + iid + grpid return inet_ntop(socket.AF_INET6, a) def in6_get6to4Prefix(addr): """ Returns the /48 6to4 prefix associated with provided IPv4 address On error, None is returned. No check is performed on public/private status of the address """ try: addr = inet_pton(socket.AF_INET, addr) addr = inet_ntop(socket.AF_INET6, '\x20\x02'+addr+'\x00'*10) except: return None return addr def in6_6to4ExtractAddr(addr): """ Extract IPv4 address embbeded in 6to4 address. Passed address must be a 6to4 addrees. None is returned on error. """ try: addr = inet_pton(socket.AF_INET6, addr) except: return None if addr[:2] != " \x02": return None return inet_ntop(socket.AF_INET, addr[2:6]) def in6_getLocalUniquePrefix(): """ Returns a pseudo-randomly generated Local Unique prefix. Function follows recommandation of Section 3.2.2 of RFC 4193 for prefix generation. """ # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. # epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0) # x = time.time() # from time import gmtime, strftime, gmtime, mktime # delta = mktime(gmtime(0)) - mktime(self.epoch) # x = x-delta tod = time.time() # time of day. Will bother with epoch later i = int(tod) j = int((tod - i)*(2**32)) tod = struct.pack("!II", i,j) # TODO: Add some check regarding system address gathering rawmac = get_if_raw_hwaddr(conf.iface)[1] mac = ":".join(map(lambda x: "%.02x" % ord(x), list(rawmac))) # construct modified EUI-64 ID eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:] import sha globalid = sha.new(tod+eui64).digest()[:5] return inet_ntop(socket.AF_INET6, '\xfd' + globalid + '\x00'*10) def in6_getRandomizedIfaceId(ifaceid, previous=None): """ Implements the interface ID generation algorithm described in RFC 3041. The function takes the Modified EUI-64 interface identifier generated as described in RFC 4291 and an optional previous history value (the first element of the output of this function). If no previous interface identifier is provided, a random one is generated. The function returns a tuple containing the randomized interface identifier and the history value (for possible future use). Input and output values are provided in a "printable" format as depicted below. ex: >>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3') ('4c61:76ff:f46a:a5f3', 'd006:d540:db11:b092') >>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3', previous='d006:d540:db11:b092') ('fe97:46fe:9871:bd38', 'eeed:d79c:2e3f:62e') """ s = "" if previous is None: d = "".join(map(chr, range(256))) for i in range(8): s += random.choice(d) previous = s s = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:] + previous import md5 s = md5.new(s).digest() s1,s2 = s[:8],s[8:] s1 = chr(ord(s1[0]) | 0x04) + s1[1:] s1 = inet_ntop(socket.AF_INET6, "\xff"*8 + s1)[20:] s2 = inet_ntop(socket.AF_INET6, "\xff"*8 + s2)[20:] return (s1, s2) _rfc1924map = [ '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E', 'F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T', 'U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i', 'j','k','l','m','n','o','p','q','r','s','t','u','v','w','x', 'y','z','!','#','$','%','&','(',')','*','+','-',';','<','=', '>','?','@','^','_','`','{','|','}','~' ] def in6_ctop(addr): """ Convert an IPv6 address in Compact Representation Notation (RFC 1924) to printable representation ;-) Returns None on error. """ if len(addr) != 20 or not reduce(lambda x,y: x and y, map(lambda x: x in _rfc1924map, addr)): return None i = 0 for c in addr: j = _rfc1924map.index(c) i = 85*i + j res = [] for j in range(4): res.append(struct.pack("!I", i%2**32)) i = i/(2**32) res.reverse() return inet_ntop(socket.AF_INET6, "".join(res)) def in6_ptoc(addr): """ Converts an IPv6 address in printable representation to RFC 1924 Compact Representation ;-) Returns None on error. """ try: d=struct.unpack("!IIII", inet_pton(socket.AF_INET6, addr)) except: return None res = 0 m = [2**96, 2**64, 2**32, 1] for i in range(4): res += d[i]*m[i] rem = res res = [] while rem: res.append(_rfc1924map[rem%85]) rem = rem/85 res.reverse() return "".join(res) def in6_isaddr6to4(x): """ Return True if provided address (in printable format) is a 6to4 address (being in 2002::/16). """ x = inet_pton(socket.AF_INET6, x) return x[:2] == ' \x02' conf.teredoPrefix = "2001::" # old one was 3ffe:831f (it is a /32) conf.teredoServerPort = 3544 def in6_isaddrTeredo(x): """ Return True if provided address is a Teredo, meaning it is under the /32 conf.teredoPrefix prefix value (by default, 2001::). Otherwise, False is returned. Address must be passed in printable format. """ our = inet_pton(socket.AF_INET6, x)[0:4] teredoPrefix = inet_pton(socket.AF_INET6, conf.teredoPrefix)[0:4] return teredoPrefix == our def teredoAddrExtractInfo(x): """ Extract information from a Teredo address. Return value is a 4-tuple made of IPv4 address of Teredo server, flag value (int), mapped address (non obfuscated) and mapped port (non obfuscated). No specific checks are performed on passed address. """ addr = inet_pton(socket.AF_INET6, x) server = inet_ntop(socket.AF_INET, addr[4:8]) flag = struct.unpack("!H",addr[8:10])[0] mappedport = struct.unpack("!H",strxor(addr[10:12],'\xff'*2))[0] mappedaddr = inet_ntop(socket.AF_INET, strxor(addr[12:16],'\xff'*4)) return server, flag, mappedaddr, mappedport def in6_iseui64(x): """ Return True if provided address has an interface identifier part created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*). Otherwise, False is returned. Address must be passed in printable format. """ eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0') x = in6_and(inet_pton(socket.AF_INET6, x), eui64) return x == eui64 def in6_isanycast(x): # RFC 2526 if in6_iseui64(x): s = '::fdff:ffff:ffff:ff80' x = in6_and(x, inet_pton(socket.AF_INET6, '::ffff:ffff:ffff:ff80')) x = in6_and(x, inet_pton(socket.AF_INET6, s)) return x == inet_pton(socket.AF_INET6, s) else: # not EUI-64 #| n bits | 121-n bits | 7 bits | #+---------------------------------+------------------+------------+ #| subnet prefix | 1111111...111111 | anycast ID | #+---------------------------------+------------------+------------+ # | interface identifier field | warning('in6_isanycast(): TODO not EUI-64') return 0 def _in6_bitops(a1, a2, operator=0): a1 = struct.unpack('4I', a1) a2 = struct.unpack('4I', a2) fop = [ lambda x,y: x | y, lambda x,y: x & y, lambda x,y: x ^ y ] ret = map(fop[operator%len(fop)], a1, a2) t = ''.join(map(lambda x: struct.pack('I', x), ret)) return t def in6_or(a1, a2): """ Provides a bit to bit OR of provided addresses. They must be passed in network format. Return value is also an IPv6 address in network format. """ return _in6_bitops(a1, a2, 0) def in6_and(a1, a2): """ Provides a bit to bit AND of provided addresses. They must be passed in network format. Return value is also an IPv6 address in network format. """ return _in6_bitops(a1, a2, 1) def in6_xor(a1, a2): """ Provides a bit to bit XOR of provided addresses. They must be passed in network format. Return value is also an IPv6 address in network format. """ return _in6_bitops(a1, a2, 2) def in6_cidr2mask(m): """ Return the mask (bitstring) associated with provided length value. For instance if function is called on 48, return value is '\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'. """ if m > 128 or m < 0: raise Scapy_Exception("value provided to in6_cidr2mask outside [0, 128] domain (%d)" % m) t = [] for i in xrange(0, 4): t.append(max(0, 2**32 - 2**(32-min(32, m)))) m -= 32 return ''.join(map(lambda x: struct.pack('!I', x), t)) def in6_getnsma(a): """ Return link-local solicited-node multicast address for given address. Passed address must be provided in network format. Returned value is also in network format. """ r = in6_and(a, inet_pton(socket.AF_INET6, '::ff:ffff')) r = in6_or(inet_pton(socket.AF_INET6, 'ff02::1:ff00:0'), r) return r def in6_getnsmac(a): # return multicast Ethernet address associated with multicast v6 destination """ Return the multicast mac address associated with provided IPv6 address. Passed address must be in network format. """ a = struct.unpack('16B', a)[-4:] mac = '33:33:' mac += ':'.join(map(lambda x: '%.2x' %x, a)) return mac def in6_getha(prefix): """ Return the anycast address associated with all home agents on a given subnet. """ r = in6_and(inet_pton(socket.AF_INET6, prefix), in6_cidr2mask(64)) r = in6_or(r, inet_pton(socket.AF_INET6, '::fdff:ffff:ffff:fffe')) return inet_ntop(socket.AF_INET6, r) def in6_ptop(str): """ Normalizes IPv6 addresses provided in printable format, returning the same address in printable format. (2001:0db8:0:0::1 -> 2001:db8::1) """ return inet_ntop(socket.AF_INET6, inet_pton(socket.AF_INET6, str)) def in6_isincluded(addr, prefix, plen): """ Returns True when 'addr' belongs to prefix/plen. False otherwise. """ temp = inet_pton(socket.AF_INET6, addr) pref = in6_cidr2mask(plen) zero = inet_pton(socket.AF_INET6, prefix) return zero == in6_and(temp, pref) def in6_isdocaddr(str): """ Returns True if provided address in printable format belongs to 2001:db8::/32 address space reserved for documentation (as defined in RFC 3849). """ return in6_isincluded(str, '2001:db8::', 32) def in6_islladdr(str): """ Returns True if provided address in printable format belongs to _allocated_ link-local unicast address space (fe80::/10) """ return in6_isincluded(str, 'fe80::', 10) def in6_issladdr(str): """ Returns True if provided address in printable format belongs to _allocated_ site-local address space (fec0::/10). This prefix has been deprecated, address being now reserved by IANA. Function will remain for historic reasons. """ return in6_isincluded(str, 'fec0::', 10) def in6_isuladdr(str): """ Returns True if provided address in printable format belongs to Unique local address space (fc00::/7). """ return in6_isincluded(str, 'fc::', 7) # TODO : we should see the status of Unique Local addresses against # global address space. # Up-to-date information is available through RFC 3587. # We should review function behavior based on its content. def in6_isgladdr(str): """ Returns True if provided address in printable format belongs to _allocated_ global address space (2000::/3). Please note that, Unique Local addresses (FC00::/7) are not part of global address space, and won't match. """ return in6_isincluded(str, '2000::', 3) def in6_ismaddr(str): """ Returns True if provided address in printable format belongs to allocated Multicast address space (ff00::/8). """ return in6_isincluded(str, 'ff00::', 8) def in6_ismnladdr(str): """ Returns True if address belongs to node-local multicast address space (ff01::/16) as defined in RFC """ return in6_isincluded(str, 'ff01::', 16) def in6_ismgladdr(str): """ Returns True if address belongs to global multicast address space (ff0e::/16). """ return in6_isincluded(str, 'ff0e::', 16) def in6_ismlladdr(str): """ Returns True if address balongs to link-local multicast address space (ff02::/16) """ return in6_isincluded(str, 'ff02::', 16) def in6_ismsladdr(str): """ Returns True if address belongs to site-local multicast address space (ff05::/16). Site local address space has been deprecated. Function remains for historic reasons. """ return in6_isincluded(str, 'ff05::', 16) def in6_isaddrllallnodes(str): """ Returns True if address is the link-local all-nodes multicast address (ff02::1). """ return (inet_pton(socket.AF_INET6, "ff02::1") == inet_pton(socket.AF_INET6, str)) def in6_isaddrllallservers(str): """ Returns True if address is the link-local all-servers multicast address (ff02::2). """ return (inet_pton(socket.AF_INET6, "ff02::2") == inet_pton(socket.AF_INET6, str)) def in6_getscope(addr): """ Returns the scope of the address. """ if in6_isgladdr(addr): scope = IPV6_ADDR_GLOBAL elif in6_islladdr(addr): scope = IPV6_ADDR_LINKLOCAL elif in6_issladdr(addr): scope = IPV6_ADDR_SITELOCAL elif in6_ismaddr(addr): scope = IPV6_ADDR_MULTICAST elif addr == '::1': scope = IPV6_ADDR_LOOPBACK else: scope = -1 return scope ############################################################################# ############################################################################# ### IPv6 Class ### ############################################################################# ############################################################################# class IP6Field(Field): def __init__(self, name, default): Field.__init__(self, name, default, "16s") def h2i(self, pkt, x): if type(x) is str: try: x = in6_ptop(x) except socket.error: x = Net6(x) elif type(x) is list: x = map(Net6, x) return x def i2m(self, pkt, x): return inet_pton(socket.AF_INET6, x) def m2i(self, pkt, x): return inet_ntop(socket.AF_INET6, x) def any2i(self, pkt, x): return self.h2i(pkt,x) def i2repr(self, pkt, x): if x is None: return self.i2h(pkt,x) elif not isinstance(x, Net6) and not type(x) is list: if in6_isaddrTeredo(x): # print Teredo info server, flag, maddr, mport = teredoAddrExtractInfo(x) return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport) elif in6_isaddr6to4(x): # print encapsulated address vaddr = in6_6to4ExtractAddr(x) return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr) return self.i2h(pkt, x) # No specific information to return class SourceIP6Field(IP6Field): def __init__(self, name, dstname): IP6Field.__init__(self, name, None) self.dstname = dstname def i2m(self, pkt, x): if x is None: dst=getattr(pkt,self.dstname) iff,x,nh = conf.route6.route(dst) return IP6Field.i2m(self, pkt, x) def i2h(self, pkt, x): if x is None: dst=getattr(pkt,self.dstname) if isinstance(dst,Gen): r = map(conf.route6.route, dst) r.sort() if r[0] == r[-1]: x=r[0][1] else: warning("More than one possible route for %s"%repr(dst)) return None else: iff,x,nh = conf.route6.route(dst) return IP6Field.i2h(self, pkt, x) ipv6nh = { 0:"Hop-by-Hop Option Header", 4:"IP", 6:"TCP", 17:"UDP", 41:"IPv6", 43:"Routing Header", 44:"Fragment Header", 47:"GRE", 50:"ESP Header", 51:"AH Header", 58:"ICMPv6", 59:"No Next Header", 60:"Destination Option Header", 135:"Mobility Header"} ipv6nhcls = { 0: "IPv6ExtHdrHopByHop", 4: "IP", 6: "TCP", 17: "UDP", 43: "IPv6ExtHdrRouting", 44: "IPv6ExtHdrFragment", #50: "IPv6ExtHrESP", #51: "IPv6ExtHdrAH", 58: "ICMPv6Unknown", 59: "Raw", 60: "IPv6ExtHdrDestOpt" } class IP6ListField(StrField): islist = 1 def __init__(self, name, default, count_from=None, length_from=None): if default is None: default = [] StrField.__init__(self, name, default) self.count_from = count_from self.length_from = length_from def i2len(self, pkt, i): return 16*len(i) def i2count(self, pkt, i): if type(i) is list: return len(i) return 0 def getfield(self, pkt, s): c = l = None if self.length_from is not None: l = self.length_from(pkt) elif self.count_from is not None: c = self.count_from(pkt) lst = [] ret = "" remain = s if l is not None: remain,ret = s[:l],s[l:] while remain: if c is not None: if c <= 0: break c -= 1 addr = inet_ntop(socket.AF_INET6, remain[:16]) lst.append(addr) remain = remain[16:] return remain+ret,lst def i2m(self, pkt, x): s = '' for y in x: try: y = inet_pton(socket.AF_INET6, y) except: y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0] y = inet_pton(socket.AF_INET6, y) s += y return s def i2repr(self,pkt,x): s = [] if x == None: return "[]" for y in x: s.append('%s' % y) return "[ %s ]" % (", ".join(s)) class _IPv6GuessPayload: name = "Dummy class that implements guess_payload_class() for IPv6" def default_payload_class(self,p): if self.nh == 58 and len(p) > 2: t = ord(p[0]) if t == 139 or t == 140: # Node Info Query return _niquery_guesser(p) return get_cls(icmp6typescls.get(t,"Raw"), "Raw") elif self.nh == 135 and len(p) > 3: return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic) else: return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw") class IPv6(_IPv6GuessPayload, Packet, IPTools): name = "IPv6" fields_desc = [ BitField("version" , 6 , 4), BitField("tc", 0, 8), #TODO: IPv6, ByteField ? BitField("fl", 0, 20), ShortField("plen", None), ByteEnumField("nh", 59, ipv6nh), ByteField("hlim", 64), SourceIP6Field("src", "dst"), # dst is for src @ selection IP6Field("dst", "::1") ] def mysummary(self): return "%s > %s (%i)" % (self.src,self.dst, self.nh) def post_build(self, p, pay): p += pay if self.plen is None: l = len(p) - 40 p = p[:4]+struct.pack("!H", l)+p[6:] return p def extract_padding(self, s): l = self.plen return s[:l], s[l:] def hashret(self): if self.nh == 58 and isinstance(self.payload, _ICMPv6): if self.payload.type < 128: return self.payload.payload.hashret() elif (self.payload.type in [133,134,135,136,144,145]): return struct.pack("B", self.nh)+self.payload.hashret() nh = self.nh sd = self.dst ss = self.src if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting): # With routing header, the destination is the last # address of the IPv6 list if segleft > 0 nh = self.payload.nh try: sd = self.addresses[-1] except IndexError: sd = '::1' # TODO: big bug with ICMPv6 error messages as the destination of IPerror6 # could be anything from the original list ... if 1: sd = inet_pton(socket.AF_INET6, sd) for a in self.addresses: a = inet_pton(socket.AF_INET6, a) sd = strxor(sd, a) sd = inet_ntop(socket.AF_INET6, sd) if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment): nh = self.payload.nh if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop): nh = self.payload.nh if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): foundhao = None for o in self.payload.options: if isinstance(o, HAO): foundhao = o if foundhao: nh = self.payload.nh # XXX what if another extension follows ? ss = foundhao.hoa if conf.checkIPsrc and conf.checkIPaddr: sd = inet_pton(socket.AF_INET6, sd) ss = inet_pton(socket.AF_INET6, self.src) return struct.pack("B",nh)+self.payload.hashret() else: return struct.pack("B", nh)+self.payload.hashret() def answers(self, other): if not isinstance(other, IPv6): # self is reply, other is request return False if conf.checkIPaddr: ss = inet_pton(socket.AF_INET6, self.src) sd = inet_pton(socket.AF_INET6, self.dst) os = inet_pton(socket.AF_INET6, other.src) od = inet_pton(socket.AF_INET6, other.dst) # request was sent to a multicast address (other.dst) # Check reply destination addr matches request source addr (i.e # sd == os) except when reply is multicasted too # XXX test mcast scope matching ? if in6_ismaddr(other.dst): if in6_ismaddr(self.dst): if ((od == sd) or (in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))): return self.payload.answers(other.payload) return False if (os == sd): return self.payload.answers(other.payload) return False elif (sd != os): # or ss != od): <- removed for ICMP errors return False if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128: # ICMPv6 Error message -> generated by IPv6 packet # Note : at the moment, we jump the ICMPv6 specific class # to call answers() method of erroneous packet (over # initial packet). There can be cases where an ICMPv6 error # class could implement a specific answers method that perform # a specific task. Currently, don't see any use ... return self.payload.payload.answers(other) elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop): return self.payload.answers(other.payload.payload) elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment): return self.payload.answers(other.payload.payload) elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting): return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt): return self.payload.payload.answers(other.payload.payload) elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance return self.payload.payload.answers(other.payload) else: if (self.nh != other.nh): return False return self.payload.answers(other.payload) import scapy scapy.IPv6 = IPv6 class IPerror6(IPv6): name = "IPv6 in ICMPv6" def answers(self, other): if not isinstance(other, IPv6): return False sd = inet_pton(socket.AF_INET6, self.dst) ss = inet_pton(socket.AF_INET6, self.src) od = inet_pton(socket.AF_INET6, other.dst) os = inet_pton(socket.AF_INET6, other.src) # Make sure that the ICMPv6 error is related to the packet scapy sent if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128: # find upper layer for self (possible citation) selfup = self.payload while selfup is not None and isinstance(selfup, _IPv6ExtHdr): selfup = selfup.payload # find upper layer for other (initial packet). Also look for RH otherup = other.payload request_has_rh = False while otherup is not None and isinstance(otherup, _IPv6ExtHdr): if isinstance(otherup, IPv6ExtHdrRouting): request_has_rh = True otherup = otherup.payload if ((ss == os and sd == od) or # <- Basic case (ss == os and request_has_rh)): # <- Request has a RH : # don't check dst address # Let's deal with possible MSS Clamping if (isinstance(selfup, TCP) and isinstance(otherup, TCP) and selfup.options != otherup.options): # seems clamped # Save fields modified by MSS clamping old_otherup_opts = otherup.options old_otherup_cksum = otherup.chksum old_otherup_dataofs = otherup.dataofs old_selfup_opts = selfup.options old_selfup_cksum = selfup.chksum old_selfup_dataofs = selfup.dataofs # Nullify them otherup.options = [] otherup.chksum = 0 otherup.dataofs = 0 selfup.options = [] selfup.chksum = 0 selfup.dataofs = 0 # Test it and save result s1 = str(selfup) s2 = str(otherup) l = min(len(s1), len(s2)) res = s1[:l] == s2[:l] # recall saved values otherup.options = old_otherup_opts otherup.chksum = old_otherup_cksum otherup.dataofs = old_otherup_dataofs selfup.options = old_selfup_opts selfup.chksum = old_selfup_cksum selfup.dataofs = old_selfup_dataofs return res s1 = str(selfup) s2 = str(otherup) l = min(len(s1), len(s2)) return s1[:l] == s2[:l] return False def mysummary(self): return Packet.mysummary(self) ############################################################################# ############################################################################# ### Upper Layer Checksum computation ### ############################################################################# ############################################################################# class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation name = "Pseudo IPv6 Header" fields_desc = [ IP6Field("src", "::"), IP6Field("dst", "::"), ShortField("uplen", None), BitField("zero", 0, 24), ByteField("nh", 0) ] def in6_chksum(nh, u, p): """ Performs IPv6 Upper Layer checksum computation. Provided parameters are: - 'nh' : value of upper layer protocol - 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be provided with all under layers (IPv6 and all extension headers, for example) - 'p' : the payload of the upper layer provided as a string Functions operate by filling a pseudo header class instance (PseudoIPv6) with - Next Header value - the address of _final_ destination (if some Routing Header with non segleft field is present in underlayer classes, last address is used.) - the address of _real_ source (basically the source address of an IPv6 class instance available in the underlayer or the source address in HAO option if some Destination Option header found in underlayer includes this option). - the length is the length of provided payload string ('p') """ ph6 = PseudoIPv6() ph6.nh = nh rthdr = 0 hahdr = 0 final_dest_addr_found = 0 while u != None and not isinstance(u, IPv6): if (isinstance(u, IPv6ExtHdrRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[-1] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and isinstance(u.options[0], HAO)): hahdr = u.options[0].hoa u = u.underlayer if u is None: warning("No IPv6 underlayer to compute checksum. Leaving null.") return 0 if hahdr: ph6.src = hahdr else: ph6.src = u.src if rthdr: ph6.dst = rthdr else: ph6.dst = u.dst ph6.uplen = len(p) ph6s = str(ph6) return checksum(ph6s+p) ############################################################################# ############################################################################# ### Extension Headers ### ############################################################################# ############################################################################# # Inherited by all extension header classes class _IPv6ExtHdr(_IPv6GuessPayload, Packet): name = 'Abstract IPV6 Option Header' aliastypes = [IPv6, IPerror6] # TODO ... scapy._IPv6OptionHeader = _IPv6ExtHdr #################### IPv6 options for Extension Headers ##################### _hbhopts = { 0x00: "Pad1", 0x01: "PadN", 0x04: "Tunnel Encapsulation Limit", 0x05: "Router Alert", 0x06: "Quick-Start", 0xc2: "Jumbo Payload", 0xc9: "Home Address Option" } class _OTypeField(ByteEnumField): """ Modified BytEnumField that displays information regarding the IPv6 option based on its option type value (What should be done by nodes that process the option if they do not understand it ...) It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options """ pol = {0x00: "00: skip", 0x40: "01: discard", 0x80: "10: discard+ICMP", 0xC0: "11: discard+ICMP not mcast"} enroutechange = {0x00: "0: Don't change en-route", 0x20: "1: May change en-route" } def i2repr(self, pkt, x): s = self.i2s.get(x, repr(x)) polstr = self.pol[(x & 0xC0)] enroutechangestr = self.enroutechange[(x & 0x20)] return "%s [%s, %s]" % (s, polstr, enroutechangestr) class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option name = "Scapy6 Unknown Option" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from = lambda pkt: pkt.optlen) ] def alignment(self, curpos): # By default, no alignment requirement """ As specified in section 4.2 of RFC 2460, every options has an alignment requirement ususally expressed xn+y, meaning the Option Type must appear at an integer multiple of x octest from the start of the header, plus y octet. That function is provided the current position from the start of the header and returns required padding length. """ return 0 class Pad1(Packet): # IPv6 Hop-By-Hop Option name = "Pad1" fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ] def alignment_delta(self, curpos): # No alignment requirement return 0 class PadN(Packet): # IPv6 Hop-By-Hop Option name = "PadN" fields_desc = [_OTypeField("otype", 0x01, _hbhopts), FieldLenField("optlen", None, length_of="optdata", fmt="B"), StrLenField("optdata", "", length_from = lambda pkt: pkt.optlen)] def alignment_delta(self, curpos): # No alignment requirement return 0 class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option name = "Router Alert" fields_desc = [_OTypeField("otype", 0x05, _hbhopts), ByteField("optlen", 2), ShortEnumField("value", None, { 0: "Datagram contains a MLD message", 1: "Datagram contains RSVP message", 2: "Datagram contains an Active Network message" }) ] # TODO : Check IANA has not defined new values for value field of RouterAlertOption # TODO : now that we have that option, we should do something in MLD class that need it def alignment_delta(self, curpos): # alignment requirement : 2n+0 x = 2 ; y = 0 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta class Jumbo(Packet): # IPv6 Hop-By-Hop Option name = "Jumbo Payload" fields_desc = [_OTypeField("otype", 0xC2, _hbhopts), ByteField("optlen", 4), IntField("jumboplen", None) ] def alignment_delta(self, curpos): # alignment requirement : 4n+2 x = 4 ; y = 2 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta class HAO(Packet): # IPv6 Destination Options Header Option name = "Home Address Option" fields_desc = [_OTypeField("otype", 0xC9, _hbhopts), ByteField("optlen", 16), IP6Field("hoa", "::") ] def alignment_delta(self, curpos): # alignment requirement : 8n+6 x = 8 ; y = 6 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta _hbhoptcls = { 0x00: Pad1, 0x01: PadN, 0x05: RouterAlert, 0xC2: Jumbo, 0xC9: HAO } ######################## Hop-by-Hop Extension Header ######################## class _HopByHopOptionsField(PacketListField): islist = 1 holds_packet = 1 def __init__(self, name, default, cls, curpos, count_from=None, length_from=None): self.curpos = curpos PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from) def i2len(self, pkt, i): l = len(self.i2m(pkt, i)) return l def i2count(self, pkt, i): if type(i) is list: return len(i) return 0 def getfield(self, pkt, s): c = l = None if self.length_from is not None: l = self.length_from(pkt) elif self.count_from is not None: c = self.count_from(pkt) opt = [] ret = "" x = s if l is not None: x,ret = s[:l],s[l:] while x: if c is not None: if c <= 0: break c -= 1 o = ord(x[0]) # Option type cls = self.cls if _hbhoptcls.has_key(o): cls = _hbhoptcls[o] try: op = cls(x) except: op = self.cls(x) opt.append(op) if isinstance(op.payload, Raw): x = op.payload.load del(op.payload) else: x = "" return x+ret,opt def i2m(self, pkt, x): autopad = None try: autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field except: autopad = 1 if not autopad: return "".join(map(str, x)) curpos = self.curpos s = "" for p in x: d = p.alignment_delta(curpos) curpos += d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) pstr = str(p) curpos += len(pstr) s += pstr # Let's make the class including our option field # a multiple of 8 octets long d = curpos % 8 if d == 0: return s d = 8 - d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) return s def addfield(self, pkt, s, val): return s+self.i2m(pkt, val) class _PhantomAutoPadField(ByteField): def addfield(self, pkt, s, val): return s def getfield(self, pkt, s): return s, 1 def i2repr(self, pkt, x): if x: return "On" return "Off" class IPv6ExtHdrHopByHop(_IPv6ExtHdr): name = "IPv6 Extension Header - Hop-by-Hop Options Header" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust = lambda pkt,x: (x+2+7)/8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default _HopByHopOptionsField("options", [], HBHOptUnknown, 2, length_from = lambda pkt: (8*(pkt.len+1))-2) ] overload_fields = {IPv6: { "nh": 0 }} ######################## Destination Option Header ########################## class IPv6ExtHdrDestOpt(_IPv6ExtHdr): name = "IPv6 Extension Header - Destination Options Header" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, length_of="options", fmt="B", adjust = lambda pkt,x: (x+2+7)/8 - 1), _PhantomAutoPadField("autopad", 1), # autopad activated by default _HopByHopOptionsField("options", [], HBHOptUnknown, 2, length_from = lambda pkt: (8*(pkt.len+1))-2) ] overload_fields = {IPv6: { "nh": 60 }} ############################# Routing Header ################################ class IPv6ExtHdrRouting(_IPv6ExtHdr): name = "IPv6 Option Header Routing" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), FieldLenField("len", None, count_of="addresses", fmt="B", adjust = lambda pkt,x:2*x), # in 8 bytes blocks ByteField("type", 0), ByteField("segleft", None), BitField("reserved", 0, 32), # There is meaning in this field ... IP6ListField("addresses", [], length_from = lambda pkt: 8*pkt.len)] overload_fields = {IPv6: { "nh": 43 }} def post_build(self, pkt, pay): if self.segleft is None: pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:] return _IPv6ExtHdr.post_build(self, pkt, pay) ########################### Fragmentation Header ############################ class IPv6ExtHdrFragment(_IPv6ExtHdr): name = "IPv6 Extension Header - Fragmentation header" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), BitField("res1", 0, 8), BitField("offset", 0, 13), BitField("res2", 0, 2), BitField("m", 0, 1), IntField("id", None) ] overload_fields = {IPv6: { "nh": 44 }} def defragment6(pktlist): """ Performs defragmentation of a list of IPv6 packets. Packets are reordered. Crap is dropped. What lacks is completed by 'X' characters. """ l = filter(lambda x: IPv6ExtHdrFragment in x, pktlist) # remove non fragments if not l: return [] id = l[0][IPv6ExtHdrFragment].id llen = len(l) l = filter(lambda x: x[IPv6ExtHdrFragment].id == id, l) if len(l) != llen: warning("defragment6: some fragmented packets have been removed from list") llen = len(l) # reorder fragments i = 0 res = [] while l: min_pos = 0 min_offset = l[0][IPv6ExtHdrFragment].offset for p in l: cur_offset = p[IPv6ExtHdrFragment].offset if cur_offset < min_offset: min_pos = 0 min_offset = cur_offset res.append(l[min_pos]) del(l[min_pos]) # regenerate the fragmentable part fragmentable = "" for p in res: q=p[IPv6ExtHdrFragment] offset = 8*q.offset if offset != len(fragmentable): warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset)) fragmentable += "X"*(offset - len(fragmentable)) fragmentable += str(q.payload) # Regenerate the unfragmentable part. q = res[0] nh = q[IPv6ExtHdrFragment].nh q[IPv6ExtHdrFragment].underlayer.nh = nh q[IPv6ExtHdrFragment].underlayer.payload = None q /= Raw(load=fragmentable) return IPv6(str(q)) def fragment6(pkt, fragSize): """ Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected maximum size of fragments (MTU). The list of packets is returned. If packet does not contain an IPv6OPtionHeaderFragment class, it is returned in result list. """ pkt = pkt.copy() s = str(pkt) # for instantiation to get upper layer checksum right if len(s) <= fragSize: return [pkt] if not IPv6ExtHdrFragment in pkt: # TODO : automatically add a fragment before upper Layer # at the moment, we do nothing and return initial packet # as single element of a list return [pkt] # Fragmentable part : fake IPv6 for Fragmentable part length computation fragPart = pkt[IPv6ExtHdrFragment].payload tmp = str(IPv6(src="::1", dst="::1")/fragPart) fragPartLen = len(tmp) - 40 # basic IPv6 header length fragPartStr = s[-fragPartLen:] # Grab Next Header for use in Fragment Header nh = IPv6(tmp[:40]).nh # Keep fragment header fragHeader = pkt[IPv6ExtHdrFragment] fragHeader.payload = None # detach payload # Unfragmentable Part unfragPartLen = len(s) - fragPartLen - 8 unfragPart = pkt pkt[IPv6ExtHdrFragment].underlayer.payload = None # detach payload # Cut the fragmentable part to fit fragSize. Inner fragments have # a length that is an integer multiple of 8 octets. last Frag MTU # can be anything below MTU lastFragSize = fragSize - unfragPartLen - 8 innerFragSize = lastFragSize - (lastFragSize % 8) if lastFragSize <= 0 or innerFragSize == 0: warning("Provided fragment size value is too low. " + "Should be more than %d" % (unfragPartLen + 8)) return [unfragPart/fragHeader/fragPart] remain = fragPartStr res = [] fragOffset = 0 # offset, incremeted during creation fragId = random.randint(0,0xffffffff) # random id ... if fragHeader.id is not None: # ... except id provided by user fragId = fragHeader.id fragHeader.m = 1 fragHeader.id = fragId fragHeader.nh = nh # Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ... while True: if (len(remain) > lastFragSize): tmp = remain[:innerFragSize] remain = remain[innerFragSize:] fragHeader.offset = fragOffset # update offset fragOffset += (innerFragSize / 8) # compute new one if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart/fragHeader/Raw(load=tmp) res.append(tempo) else: fragHeader.offset = fragOffset # update offSet fragHeader.m = 0 if IPv6 in unfragPart: unfragPart[IPv6].plen = None tempo = unfragPart/fragHeader/Raw(load=remain) res.append(tempo) break return res ############################### AH Header ################################### # class _AHFieldLenField(FieldLenField): # def getfield(self, pkt, s): # l = getattr(pkt, self.fld) # l = (l*8)-self.shift # i = self.m2i(pkt, s[:l]) # return s[l:],i # class _AHICVStrLenField(StrLenField): # def i2len(self, pkt, x): # class IPv6ExtHdrAH(_IPv6ExtHdr): # name = "IPv6 Extension Header - AH" # fields_desc = [ ByteEnumField("nh", 59, ipv6nh), # _AHFieldLenField("len", None, "icv"), # ShortField("res", 0), # IntField("spi", 0), # IntField("sn", 0), # _AHICVStrLenField("icv", None, "len", shift=2) ] # overload_fields = {IPv6: { "nh": 51 }} # def post_build(self, pkt, pay): # if self.len is None: # pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:] # if self.segleft is None: # pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:] # return _IPv6ExtHdr.post_build(self, pkt, pay) ############################### ESP Header ################################## # class IPv6ExtHdrESP(_IPv6extHdr): # name = "IPv6 Extension Header - ESP" # fields_desc = [ IntField("spi", 0), # IntField("sn", 0), # # there is things to extract from IKE work # ] # overloads_fields = {IPv6: { "nh": 50 }} ############################################################################# ############################################################################# ### ICMPv6* Classes ### ############################################################################# ############################################################################# icmp6typescls = { 1: "ICMPv6DestUnreach", 2: "ICMPv6PacketTooBig", 3: "ICMPv6TimeExceeded", 4: "ICMPv6ParamProblem", 128: "ICMPv6EchoRequest", 129: "ICMPv6EchoReply", 130: "ICMPv6MLQuery", 131: "ICMPv6MLReport", 132: "ICMPv6MLDone", 133: "ICMPv6ND_RS", 134: "ICMPv6ND_RA", 135: "ICMPv6ND_NS", 136: "ICMPv6ND_NA", 137: "ICMPv6ND_Redirect", #138: Do Me - RFC 2894 - Seems painful 139: "ICMPv6NIQuery", 140: "ICMPv6NIReply", 141: "ICMPv6ND_INDSol", 142: "ICMPv6ND_INDAdv", #143: Do Me - RFC 3810 144: "ICMPv6HAADRequest", 145: "ICMPv6HAADReply", 146: "ICMPv6MPSol", 147: "ICMPv6MPAdv", 148: "ICMPv6SEND_CPS", 149: "ICMPv6SEND_CPA", 151: "ICMPv6MRD_Advertisement", 152: "ICMPv6MRD_Solicitation", 153: "ICMPv6MRD_Termination", } icmp6types = { 1 : "Destination unreachable", 2 : "Packet too big", 3 : "Time exceeded", 4 : "Parameter problem", 100 : "Private Experimentation", 101 : "Private Experimentation", 128 : "Echo Request", 129 : "Echo Reply", 130 : "MLD Query", 131 : "MLD Report", 132 : "MLD Done", 133 : "Router Solicitation", 134 : "Router Advertisement", 135 : "Neighbor Solicitation", 136 : "Neighbor Advertisement", 137 : "Redirect Message", 138 : "Router Renumbering", 139 : "ICMP Node Information Query", 140 : "ICMP Node Information Response", 141 : "Inverse Neighbor Discovery Solicitation Message", 142 : "Inverse Neighbor Discovery Advertisement Message", 143 : "Version 2 Multicast Listener Report", 144 : "Home Agent Address Discovery Request Message", 145 : "Home Agent Address Discovery Reply Message", 146 : "Mobile Prefix Solicitation", 147 : "Mobile Prefix Advertisement", 148 : "Certification Path Solicitation", 149 : "Certification Path Advertisement", 151 : "Multicast Router Advertisement", 152 : "Multicast Router Solicitation", 153 : "Multicast Router Termination", 200 : "Private Experimentation", 201 : "Private Experimentation" } class _ICMPv6(Packet): name = "ICMPv6 dummy class" overload_fields = {IPv6: {"nh": 58}} def post_build(self, p, pay): p += pay if self.cksum == None: chksum = in6_chksum(58, self.underlayer, p) p = p[:2]+struct.pack("!H", chksum)+p[4:] return p def hashret(self): return self.payload.hashret() def answers(self, other): # isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ... if (isinstance(self.underlayer, IPerror6) or isinstance(self.underlayer, _IPv6ExtHdr) and isinstance(other, _ICMPv6)): if not ((self.type == other.type) and (self.code == other.code)): return 0 return 1 return 0 class _ICMPv6Error(_ICMPv6): name = "ICMPv6 errors dummy class" def guess_payload_class(self,p): return IPerror6 class ICMPv6Unknown(_ICMPv6): name = "Scapy6 ICMPv6 fallback class" fields_desc = [ ByteEnumField("type",1, icmp6types), ByteField("code",0), XShortField("cksum", None), StrField("msgbody", "")] ################################## RFC 2460 ################################# class ICMPv6DestUnreach(_ICMPv6Error): name = "ICMPv6 Destination Unreachable" fields_desc = [ ByteEnumField("type",1, icmp6types), ByteEnumField("code",0, { 0: "No route to destination", 1: "Communication with destination administratively prohibited", 2: "Beyond scope of source address", 3: "Address unreachable", 4: "Port unreachable" }), XShortField("cksum", None), XIntField("unused",0x00000000)] class ICMPv6PacketTooBig(_ICMPv6Error): name = "ICMPv6 Packet Too Big" fields_desc = [ ByteEnumField("type",2, icmp6types), ByteField("code",0), XShortField("cksum", None), IntField("mtu",1280)] class ICMPv6TimeExceeded(_ICMPv6Error): name = "ICMPv6 Time Exceeded" fields_desc = [ ByteEnumField("type",3, icmp6types), ByteField("code",{ 0: "hop limit exceeded in transit", 1: "fragment reassembly time exceeded"}), XShortField("cksum", None), XIntField("unused",0x00000000)] # The default pointer value is set to the next header field of # the encapsulated IPv6 packet class ICMPv6ParamProblem(_ICMPv6Error): name = "ICMPv6 Parameter Problem" fields_desc = [ ByteEnumField("type",4, icmp6types), ByteEnumField("code",0, {0: "erroneous header field encountered", 1: "unrecognized Next Header type encountered", 2: "unrecognized IPv6 option encountered"}), XShortField("cksum", None), IntField("ptr",6)] class ICMPv6EchoRequest(_ICMPv6): name = "ICMPv6 Echo Request" fields_desc = [ ByteEnumField("type", 128, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id",0), XShortField("seq",0), StrField("data", "")] def mysummary(self): return self.sprintf("%name% (id: %id% seq: %seq%)") def hashret(self): return struct.pack("HH",self.id,self.seq)+self.payload.hashret() class ICMPv6EchoReply(ICMPv6EchoRequest): name = "ICMPv6 Echo Reply" __metaclass__ = NewDefaultValues type = 129 def answers(self, other): # We could match data content between request and reply. return (isinstance(other, ICMPv6EchoRequest) and self.id == other.id and self.seq == other.seq and self.data == other.data) ############ ICMPv6 Multicast Listener Discovery (RFC3810) ################## # tous les messages MLD sont emis avec une adresse source lien-locale # -> Y veiller dans le post_build si aucune n'est specifiee # La valeur de Hop-Limit doit etre de 1 # "and an IPv6 Router Alert option in a Hop-by-Hop Options # header. (The router alert option is necessary to cause routers to # examine MLD messages sent to multicast addresses in which the router # itself has no interest" class _ICMPv6ML(_ICMPv6): fields_desc = [ ByteEnumField("type", 130, icmp6types), ByteField("code", 0), XShortField("cksum", None), ShortField("mrd", 0), ShortField("reserved", 0), IP6Field("mladdr",None)] # general queries are sent to the link-scope all-nodes multicast # address ff02::1, with a multicast address field of 0 and a MRD of # [Query Response Interval] # Default value for mladdr is set to 0 for a General Query, and # overloaded by the user for a Multicast Address specific query # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Query" __metaclass__ = NewDefaultValues type = 130 mrd = 10000 mladdr = "::" # 10s for mrd overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1 }} def hashret(self): if self.mladdr != "::": return struct.pack("HH",self.mladdr)+self.payload.hashret() else: return self.payload.hashret() # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLReport(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Report" __metaclass__ = NewDefaultValues type = 131 overload_fields = {IPv6: {"hlim": 1}} # implementer le hashret et le answers # When a node ceases to listen to a multicast address on an interface, # it SHOULD send a single Done message to the link-scope all-routers # multicast address (FF02::2), carrying in its multicast address field # the address to which it is ceasing to listen # TODO : See what we can do to automatically include a Router Alert # Option in a Destination Option Header. class ICMPv6MLDone(_ICMPv6ML): # RFC 2710 name = "MLD - Multicast Listener Done" __metaclass__ = NewDefaultValues type = 132 overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1}} ########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ############### # TODO: # - 04/09/06 troglocan : find a way to automatically add a router alert # option for all MRD packets. This could be done in a specific # way when IPv6 is the under layer with some specific keyword # like 'exthdr'. This would allow to keep compatibility with # providing IPv6 fields to be overloaded in fields_desc. # # At the moment, if user inserts an IPv6 Router alert option # none of the IPv6 default values of IPv6 layer will be set. class ICMPv6MRD_Advertisement(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Advertisement" fields_desc = [ByteEnumField("type", 151, icmp6types), ByteField("advinter", 20), XShortField("cksum", None), ShortField("queryint", 0), ShortField("robustness", 0)] overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:8], s[8:] class ICMPv6MRD_Solicitation(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Solicitation" fields_desc = [ByteEnumField("type", 152, icmp6types), ByteField("res", 0), XShortField("cksum", None) ] overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] class ICMPv6MRD_Termination(_ICMPv6): name = "ICMPv6 Multicast Router Discovery Termination" fields_desc = [ByteEnumField("type", 153, icmp6types), ByteField("res", 0), XShortField("cksum", None) ] overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}} # IPv6 Router Alert requires manual inclusion def extract_padding(self, s): return s[:4], s[4:] ################### ICMPv6 Neighbor Discovery (RFC 2461) #################### icmp6ndopts = { 1: "Source Link-Layer Address", 2: "Target Link-Layer Address", 3: "Prefix Information", 4: "Redirected Header", 5: "MTU", 6: "NBMA Shortcut Limit Option", # RFC2491 7: "Advertisement Interval Option", 8: "Home Agent Information Option", 9: "Source Address List", 10: "Target Address List", 11: "CGA Option", # RFC 3971 12: "Universal Signature Option", # draft-cheneau-csi-send-sig-agility (update RFC 3971) 13: "Timestamp Option", # RFC 3971 14: "Nonce option", # RFC 3971 15: "Trust Anchor Option", # RFC 3971 16: "Certificate Option", # RFC 3971 17: "IP Address Option", # RFC 4068 18: "New Router Prefix Information Option", # RFC 4068 19: "Link-layer Address Option", # RFC 4068 20: "Neighbor Advertisement Acknowledgement Option", 21: "CARD Request Option", # RFC 4065/4066/4067 22: "CARD Reply Option", # RFC 4065/4066/4067 23: "MAP Option", # RFC 4140 24: "Route Information Option", # RFC 4191 25: "Recusive DNS Server Option", 26: "IPv6 Router Advertisement Flags Option", # TC: value will change given the IANA inputs 42: "Supported Signature Option" } icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr", 2: "ICMPv6NDOptDstLLAddr", 3: "ICMPv6NDOptPrefixInfo", 4: "ICMPv6NDOptRedirectedHdr", 5: "ICMPv6NDOptMTU", 6: "ICMPv6NDOptShortcutLimit", 7: "ICMPv6NDOptAdvInterval", 8: "ICMPv6NDOptHAInfo", 9: "ICMPv6NDOptSrcAddrList", 10: "ICMPv6NDOptTgtAddrList", 11: "ICMPv6NDOptCGA", 12: "ICMPv6NDOptUSSig", # draft-cheneau-csi-send-sig-agility (update RFC 3971) 13: "ICMPv6NDOptTimestamp", 14: "ICMPv6NDOptNonce", 15: "ICMPv6NDOptTrustAnchor", 16: "ICMPv6NDOptCertificate", 17: "ICMPv6NDOptIPAddr", 18: "ICMPv6NDOptNewRtrPrefix", 19: "ICMPv6NDOptLLA", #20: Do Me, #21: Do Me, #22: Do Me, 23: "ICMPv6NDOptMAP", 24: "ICMPv6NDOptRouteInfo", 25: "ICMPv6NDOptRDNSS", 26: "ICMPv6NDOptEFA", # TC: value will change given the IANA inputs 42: "ICMPv6NDOptSSA" } class _ICMPv6NDGuessPayload: name = "Dummy ND class that implements guess_payload_class()" def guess_payload_class(self,p): if len(p) > 1: return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ? # Beginning of ICMPv6 Neighbor Discovery Options. class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented" fields_desc = [ ByteField("type",None), FieldLenField("len",None,length_of="data",fmt="B", adjust = lambda pkt,x: x+2), StrLenField("data","", length_from = lambda pkt: pkt.len-2) ] # NOTE: len includes type and len field. Expressed in unit of 8 bytes # TODO: Revoir le coup du ETHER_ANY class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address" fields_desc = [ ByteField("type", 1), ByteField("len", 1), MACField("lladdr", ETHER_ANY) ] def mysummary(self): return self.sprintf("%name% %lladdr%") class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr): name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address" __metaclass__ = NewDefaultValues type = 2 class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Prefix Information" fields_desc = [ ByteField("type",3), ByteField("len",4), ByteField("prefixlen",None), BitField("L",1,1), BitField("A",1,1), BitField("R",0,1), BitField("res1",0,5), XIntField("validlifetime",0xffffffffL), XIntField("preferredlifetime",0xffffffffL), XIntField("res2",0x00000000), IP6Field("prefix","::") ] def mysummary(self): return self.sprintf("%name% %prefix%") # TODO: We should also limit the size of included packet to something # like (initiallen - 40 - 2) class TruncPktLenField(PacketLenField): def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0): PacketLenField.__init__(self, name, default, cls, length_from=length_from) self.cur_shift = cur_shift def getfield(self, pkt, s): l = self.length_from(pkt) i = self.m2i(pkt, s[:l]) return s[l:],i def m2i(self, pkt, m): s = None try: # It can happen we have sth shorter than 40 bytes s = self.cls(m) except: return Raw(m) return s def i2m(self, pkt, x): s = str(x) l = len(s) r = (l + self.cur_shift) % 8 l = l - r return s[:l] def i2len(self, pkt, i): return len(self.i2m(pkt, i)) # Faire un post_build pour le recalcul de la taille (en multiple de 8 octets) class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Redirected Header" fields_desc = [ ByteField("type",4), FieldLenField("len", None, length_of="pkt", fmt="B", adjust = lambda pkt,x:(x+4)/8), XShortField("res",0), TruncPktLenField("pkt", "", IPv6, 4, length_from = lambda pkt: 8*pkt.len-4) ] # See which value should be used for default MTU instead of 1280 class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - MTU" fields_desc = [ ByteField("type",5), ByteField("len",1), XShortField("res",0), IntField("mtu",1280)] class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491 name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit" fields_desc = [ ByteField("type", 6), ByteField("len", 1), ByteField("shortcutlim", 40), # XXX ByteField("res1", 0), IntField("res2", 0) ] class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Interval Advertisement" fields_desc = [ ByteField("type",7), ByteField("len",1), ShortField("res", 0), IntField("advint", 0) ] def mysummary(self): return self.sprintf("%name% %advint% milliseconds") class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery - Home Agent Information" fields_desc = [ ByteField("type",8), ByteField("len",1), ShortField("res", 0), ShortField("pref", 0), ShortField("lifetime", 1)] def mysummary(self): return self.sprintf("%name% %pref% %lifetime% seconds") # type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support # type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)" fields_desc = [ ByteField("type",17), ByteField("len", 3), ByteEnumField("optcode", 1, {1: "Old Care-Of Address", 2: "New Care-Of Address", 3: "NAR's IP address" }), ByteField("plen", 64), IntField("res", 0), IP6Field("addr", "::") ] class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)" fields_desc = [ ByteField("type",18), ByteField("len", 3), ByteField("optcode", 0), ByteField("plen", 64), IntField("res", 0), IP6Field("prefix", "::") ] _rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP", 1: "LLA for the new AP", 2: "LLA of the MN", 3: "LLA of the NAR", 4: "LLA of the src of TrSolPr or PrRtAdv msg", 5: "AP identified by LLA belongs to current iface of router", 6: "No preifx info available for AP identified by the LLA", 7: "No fast handovers support for AP identified by the LLA" } class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068 name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)" fields_desc = [ ByteField("type", 19), ByteField("len", 1), ByteEnumField("optcode", 0, _rfc4068_lla_optcode), MACField("lla", ETHER_ANY) ] # We only support ethernet class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140 name = "ICMPv6 Neighbor Discovery - MAP Option" fields_desc = [ ByteField("type", 23), ByteField("len", 3), BitField("dist", 1, 4), BitField("pref", 15, 4), # highest availability BitField("R", 1, 1), BitField("res", 0, 7), IntField("validlifetime", 0xffffffff), IP6Field("addr", "::") ] class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191 name = "ICMPv6 Neighbor Discovery Option - Route Information Option" fields_desc = [ ByteField("type",24), ByteField("len",4), ByteField("plen", None), BitField("res1",0,3), BitField("prf",0,2), BitField("res2",0,3), IntField("rtlifetime", 0xffffffff), IP6Field("prefix", "::")] class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006 name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option" fields_desc = [ ByteField("type", 25), FieldLenField("len", None, count_of="dns", fmt="B", adjust = lambda pkt,x: 2*x+1), ShortField("res", None), IntField("lifetime", 0xffffffff), IP6ListField("dns", [], length_from = lambda pkt: 8*(pkt.len-1)) ] class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075) name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option" fields_desc = [ ByteField("type", 26), ByteField("len", 1), BitField("res", 0, 48) ] # End of ICMPv6 Neighbor Discovery Options. class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Solicitation" fields_desc = [ ByteEnumField("type", 133, icmp6types), ByteField("code",0), XShortField("cksum", None), IntField("res",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }} class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Neighbor Discovery - Router Advertisement" fields_desc = [ ByteEnumField("type", 134, icmp6types), ByteField("code",0), XShortField("cksum", None), ByteField("chlim",0), BitField("M",0,1), BitField("O",0,1), BitField("H",0,1), BitEnumField("prf",1,2, { 0: "Medium (default)", 1: "High", 2: "Reserved", 3: "Low" } ), # RFC 4191 BitField("P",0,1), BitField("res",0,2), ShortField("routerlifetime",1800), IntField("reachabletime",0), IntField("retranstimer",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} def answers(self, other): return isinstance(other, ICMPv6ND_RS) class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation" fields_desc = [ ByteEnumField("type",135, icmp6types), ByteField("code",0), XShortField("cksum", None), BitField("R",0,1), BitField("S",0,1), BitField("O",0,1), XBitField("res",0,29), IP6Field("tgt","::") ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} def mysummary(self): return self.sprintf("%name% (tgt: %tgt%)") def hashret(self): return self.tgt+self.payload.hashret() class ICMPv6ND_NA(ICMPv6ND_NS): name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement" __metaclass__ = NewDefaultValues type = 136 R = 1 O = 1 def answers(self, other): return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt # associated possible options : target link-layer option, Redirected header class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet): name = "ICMPv6 Neighbor Discovery - Redirect" fields_desc = [ ByteEnumField("type",137, icmp6types), ByteField("code",0), XShortField("cksum", None), XIntField("res",0), IP6Field("tgt","::"), IP6Field("dst","::") ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} ################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ############### class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List" fields_desc = [ ByteField("type",9), FieldLenField("len", None, count_of="addrlist", fmt="B", adjust = lambda pkt,x: 2*x+1), StrFixedLenField("res", "\x00"*6, 6), IP6ListField("addrlist", [], length_from = lambda pkt: 8*(pkt.len-1)) ] class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList): name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List" __metaclass__ = NewDefaultValues type = 10 # RFC3122 # Options requises : source lladdr et target lladdr # Autres options valides : source address list, MTU # - Comme precise dans le document, il serait bien de prendre l'adresse L2 # demandee dans l'option requise target lladdr et l'utiliser au niveau # de l'adresse destination ethernet si aucune adresse n'est precisee # - ca semble pas forcement pratique si l'utilisateur doit preciser toutes # les options. # Ether() must use the target lladdr as destination class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Solicitation" fields_desc = [ ByteEnumField("type",141, icmp6types), ByteField("code",0), XShortField("cksum",None), XIntField("reserved",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} # Options requises : target lladdr, target address list # Autres options valides : MTU class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = "ICMPv6 Inverse Neighbor Discovery Advertisement" fields_desc = [ ByteEnumField("type",142, icmp6types), ByteField("code",0), XShortField("cksum",None), XIntField("reserved",0) ] overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }} ############################################################################# ### LLMNR (RFC4795) ### ############################################################################# # LLMNR is based on the DNS packet format (RFC1035 Section 4) # RFC also envisions LLMNR over TCP. Like vista, we don't support it -- arno _LLMNR_IPv6_mcast_Addr = "FF02:0:0:0:0:0:1:3" _LLMNR_IPv4_mcast_addr = "224.0.0.252" class LLMNRQuery(Packet): name = "Link Local Multicast Node Resolution - Query" fields_desc = [ ShortField("id", 0), BitField("qr", 0, 1), BitEnumField("opcode", 0, 4, { 0:"QUERY" }), BitField("c", 0, 1), BitField("tc", 0, 2), BitField("z", 0, 4), BitEnumField("rcode", 0, 4, { 0:"ok" }), DNSRRCountField("qdcount", None, "qd"), DNSRRCountField("ancount", None, "an"), DNSRRCountField("nscount", None, "ns"), DNSRRCountField("arcount", None, "ar"), DNSQRField("qd", "qdcount"), DNSRRField("an", "ancount"), DNSRRField("ns", "nscount"), DNSRRField("ar", "arcount",0)] overload_fields = {UDP: {"sport": 5355, "dport": 5355 }} def hashret(self): return struct.pack("!H", id) class LLMNRResponse(LLMNRQuery): name = "Link Local Multicast Node Resolution - Response" __metaclass__ = NewDefaultValues qr = 1 fields_desc = [] def answers(self, other): return (isinstance(other, LLMNRQuery) and self.id == other.id and self.qr == 1 and other.qr == 0) def _llmnr_dispatcher(x, *args, **kargs): cls = Raw if len(x) >= 3: if (ord(x[4]) & 0x80): # Response cls = LLMNRResponse else: # Query cls = LLMNRQuery return cls(x, *args, **kargs) bind_bottom_up(UDP, _llmnr_dispatcher, { "dport": 5355 }) bind_bottom_up(UDP, _llmnr_dispatcher, { "sport": 5355 }) # LLMNRQuery(id=RandShort(), qd=DNSQR(qname="vista."))) ############################################################################### # ICMPv6 Node Information Queries (RFC 4620) ############################################################################### # [ ] Add automatic destination address computation using computeNIGroupAddr # in IPv6 class (Scapy6 modification when integrated) if : # - it is not provided # - upper layer is ICMPv6NIQueryName() with a valid value # [ ] Try to be liberal in what we accept as internal values for _explicit_ # DNS elements provided by users. Any string should be considered # valid and kept like it has been provided. At the moment, i2repr() will # crash on many inputs # [ ] Do the documentation # [ ] Add regression tests # [ ] Perform test against real machines (NOOP reply is proof of implementation). # [ ] Check if there are differences between different stacks. Among *BSD, # with others. # [ ] Deal with flags in a consistent way. # [ ] Implement compression in names2dnsrepr() and decompresiion in # dnsrepr2names(). Should be deactivable. icmp6_niqtypes = { 0: "NOOP", 2: "Node Name", 3: "IPv6 Address", 4: "IPv4 Address" } class _ICMPv6NIHashret: def hashret(self): return self.nonce class _ICMPv6NIAnswers: def answers(self, other): return self.nonce == other.nonce # Buggy; always returns the same value during a session class NonceField(StrFixedLenField): def __init__(self, name, default=None): StrFixedLenField.__init__(self, name, default, 8) if default is None: self.default = self.randval() # Compute the NI group Address. Can take a FQDN as input parameter def computeNIGroupAddr(name): import md5 name = name.lower().split(".")[0] record = chr(len(name))+name h = md5.new(record) h = h.digest() addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4]) return addr # Here is the deal. First, that protocol is a piece of shit. Then, we # provide 4 classes for the different kinds of Requests (one for every # valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same # data field class that is made to be smart by guessing the specifc # type of value provided : # # - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0, # if not overriden by user # - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2, # if not overriden # - Name in the other cases: code is set to 0, if not overriden by user # # Internal storage, is not only the value, but the a pair providing # the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@) # # Note : I merged getfield() and m2i(). m2i() should not be called # directly anyway. Same remark for addfield() and i2m() # # -- arno # "The type of information present in the Data field of a query is # declared by the ICMP Code, whereas the type of information in a # Reply is determined by the Qtype" def names2dnsrepr(x): """ Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!! """ if type(x) is str: if x and x[-1] == '\x00': # stupid heuristic return x x = [x] res = [] for n in x: termin = "\x00" if n.count('.') == 0: # single-component gets one more termin += '\x00' n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin res.append(n) return "".join(res) def dnsrepr2names(x): """ Take as input a DNS encoded string (possibly compressed) and returns a list of DNS names contained in it. If provided string is already in printable format (does not end with a null character, a one element list is returned). Result is a list. """ res = [] cur = "" while x: l = ord(x[0]) x = x[1:] if l == 0: if cur and cur[-1] == '.': cur = cur[:-1] res.append(cur) cur = "" if x and ord(x[0]) == 0: # single component x = x[1:] continue if l & 0xc0: # XXX TODO : work on that -- arno raise Exception("DNS message can't be compressed at this point!") else: cur += x[:l]+"." x = x[l:] return res class NIQueryDataField(StrField): def __init__(self, name, default): StrField.__init__(self, name, default) def i2h(self, pkt, x): if x is None: return x t,val = x if t == 1: val = dnsrepr2names(val)[0] return val def h2i(self, pkt, x): if x is tuple and type(x[0]) is int: return x val = None try: # Try IPv6 inet_pton(socket.AF_INET6, x) val = (0, x) except: try: # Try IPv4 inet_pton(socket.AF_INET, x) val = (2, x) except: # Try DNS if x is None: x = "" x = names2dnsrepr(x) val = (1, x) return val def i2repr(self, pkt, x): t,val = x if t == 1: # DNS Name # we don't use dnsrepr2names() to deal with # possible weird data extracted info res = [] weird = None while val: l = ord(val[0]) val = val[1:] if l == 0: if (len(res) > 1 and val): # fqdn with data behind weird = val elif len(val) > 1: # single label with data behind weird = val[1:] break res.append(val[:l]+".") val = val[l:] tmp = "".join(res) if tmp and tmp[-1] == '.': tmp = tmp[:-1] return tmp return repr(val) def getfield(self, pkt, s): qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, "") else: code = getattr(pkt, "code") if code == 0: # IPv6 Addr return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16])) elif code == 2: # IPv4 Addr return s[4:], (2, inet_ntop(socket.AF_INET, s[:4])) else: # Name or Unknown return "", (1, s) def addfield(self, pkt, s, val): if ((type(val) is tuple and val[1] is None) or val is None): val = (1, "") t = val[0] if t == 1: return s + val[1] elif t == 0: return s + inet_pton(socket.AF_INET6, val[1]) else: return s + inet_pton(socket.AF_INET, val[1]) class NIQueryCodeField(ByteEnumField): def i2m(self, pkt, x): if x is None: d = pkt.getfieldval("data") if d is None: return 1 elif d[0] == 0: # IPv6 address return 0 elif d[0] == 1: # Name return 1 elif d[0] == 2: # IPv4 address return 2 else: return 1 return x _niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"} #_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses", # 8: "Link-local addresses", 16: "Site-local addresses", # 32: "Global addresses" } # "This NI type has no defined flags and never has a Data Field". Used # to know if the destination is up and implements NI protocol. class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Query - NOOP Query" fields_desc = [ ByteEnumField("type", 139, icmp6types), NIQueryCodeField("code", None, _niquery_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIQueryDataField("data", None) ] class ICMPv6NIQueryName(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Name Query" __metaclass__ = NewDefaultValues qtype = 2 # We ask for the IPv6 address of the peer class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv6 Address Query" __metaclass__ = NewDefaultValues qtype = 3 flags = 0x3E class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP): name = "ICMPv6 Node Information Query - IPv4 Address Query" __metaclass__ = NewDefaultValues qtype = 4 _nireply_code = { 0: "Successful Reply", 1: "Response Refusal", 3: "Unknown query type" } _nireply_flags = { 1: "Reply set incomplete", 2: "All unicast addresses", 4: "IPv4 addresses", 8: "Link-local addresses", 16: "Site-local addresses", 32: "Global addresses" } # Internal repr is one of those : # (0, "some string") : unknow qtype value are mapped to that one # (3, [ (ttl, ip6), ... ]) # (4, [ (ttl, ip4), ... ]) # (2, [ttl, dns_names]) : dns_names is one string that contains # all the DNS names. Internally it is kept ready to be sent # (undissected). i2repr() decode it for user. This is to # make build after dissection bijective. # # I also merged getfield() and m2i(), and addfield() and i2m(). class NIReplyDataField(StrField): def i2h(self, pkt, x): if x is None: return x t,val = x if t == 2: ttl, dnsnames = val val = [ttl] + dnsrepr2names(dnsnames) return val def h2i(self, pkt, x): qtype = 0 # We will decode it as string if not # overridden through 'qtype' in pkt # No user hint, let's use 'qtype' value for that purpose if type(x) is not tuple: if pkt is not None: qtype = getattr(pkt, "qtype") else: qtype = x[0] x = x[1] # From that point on, x is the value (second element of the tuple) if qtype == 2: # DNS name if type(x) is str: # listify the string x = [x] if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0 x = [0] + x ttl = x[0] names = x[1:] return (2, [ttl, names2dnsrepr(names)]) elif qtype in [3, 4]: # IPv4 or IPv6 addr if type(x) is str: x = [x] # User directly provided an IP, instead of list # List elements are not tuples, user probably # omitted ttl value : we will use 0 instead def addttl(x): if type(x) is str: return (0, x) return x return (qtype, map(addttl, x)) return (qtype, x) def addfield(self, pkt, s, val): t,tmp = val if tmp is None: tmp = "" if t == 2: ttl,dnsstr = tmp return s+ struct.pack("!I", ttl) + dnsstr elif t == 3: return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp)) elif t == 4: return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp)) else: return s + tmp def getfield(self, pkt, s): code = getattr(pkt, "code") if code != 0: return s, (0, "") qtype = getattr(pkt, "qtype") if qtype == 0: # NOOP return s, (0, "") elif qtype == 2: if len(s) < 4: return s, (0, "") ttl = struct.unpack("!I", s[:4])[0] return "", (2, [ttl, s[4:]]) elif qtype == 3: # IPv6 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 20: # 4 + 16 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET6, s[4:20]) res.append((ttl, ip)) s = s[20:] return s, (3, res) elif qtype == 4: # IPv4 addresses with TTLs # XXX TODO : get the real length res = [] while len(s) >= 8: # 4 + 4 ttl = struct.unpack("!I", s[:4])[0] ip = inet_ntop(socket.AF_INET, s[4:8]) res.append((ttl, ip)) s = s[8:] return s, (4, res) else: # XXX TODO : implement me and deal with real length return "", (0, s) def i2repr(self, pkt, x): if x is None: return "[]" if type(x) is tuple and len(x) == 2: t, val = x if t == 2: # DNS names ttl,l = val l = dnsrepr2names(l) return "ttl:%d %s" % (ttl, ", ".join(l)) elif t == 3 or t == 4: return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val))) return repr(val) return repr(x) # XXX should not happen # By default, sent responses have code set to 0 (successful) class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6): name = "ICMPv6 Node Information Reply - NOOP Reply" fields_desc = [ ByteEnumField("type", 140, icmp6types), ByteEnumField("code", 0, _nireply_code), XShortField("cksum", None), ShortEnumField("qtype", 0, icmp6_niqtypes), BitField("unused", 0, 10), FlagsField("flags", 0, 6, "TACLSG"), NonceField("nonce", None), NIReplyDataField("data", None)] class ICMPv6NIReplyName(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Node Names" __metaclass__ = NewDefaultValues qtype = 2 class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv6 addresses" __metaclass__ = NewDefaultValues qtype = 3 class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - IPv4 addresses" __metaclass__ = NewDefaultValues qtype = 4 class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Responder refuses to supply answer" __metaclass__ = NewDefaultValues code = 1 class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP): name = "ICMPv6 Node Information Reply - Qtype unknown to the responder" __metaclass__ = NewDefaultValues code = 2 def _niquery_guesser(p): cls = Raw type = ord(p[0]) if type == 139: # Node Info Query specific stuff if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = { 0: ICMPv6NIQueryNOOP, 2: ICMPv6NIQueryName, 3: ICMPv6NIQueryIPv6, 4: ICMPv6NIQueryIPv4 }.get(qtype, Raw) elif type == 140: # Node Info Reply specific stuff code = ord(p[1]) if code == 0: if len(p) > 6: qtype, = struct.unpack("!H", p[4:6]) cls = { 2: ICMPv6NIReplyName, 3: ICMPv6NIReplyIPv6, 4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP) elif code == 1: cls = ICMPv6NIReplyRefuse elif code == 2: cls = ICMPv6NIReplyUnknown return cls ############################################################################# ############################################################################# ### DHCPv6 ### ############################################################################# ############################################################################# All_DHCP_Relay_Agents_and_Servers = "ff02::1:2" All_DHCP_Servers = "ff05::1:3" # Site-Local scope : deprecated by 3879 dhcp6opts = { 1: "CLIENTID", 2: "SERVERID", 3: "IA_NA", 4: "IA_TA", 5: "IAADDR", 6: "ORO", 7: "PREFERENCE", 8: "ELAPSED_TIME", 9: "RELAY_MSG", 11: "AUTH", 12: "UNICAST", 13: "STATUS_CODE", 14: "RAPID_COMMIT", 15: "USER_CLASS", 16: "VENDOR_CLASS", 17: "VENDOR_OPTS", 18: "INTERFACE_ID", 19: "RECONF_MSG", 20: "RECONF_ACCEPT", 21: "SIP Servers Domain Name List", #RFC3319 22: "SIP Servers IPv6 Address List", #RFC3319 23: "DNS Recursive Name Server Option", #RFC3646 24: "Domain Search List option", #RFC3646 25: "OPTION_IA_PD", #RFC3633 26: "OPTION_IAPREFIX", #RFC3633 27: "OPTION_NIS_SERVERS", #RFC3898 28: "OPTION_NISP_SERVERS", #RFC3898 29: "OPTION_NIS_DOMAIN_NAME", #RFC3898 30: "OPTION_NISP_DOMAIN_NAME", #RFC3898 31: "OPTION_SNTP_SERVERS", #RFC4075 32: "OPTION_INFORMATION_REFRESH_TIME", #RFC4242 33: "OPTION_BCMCS_SERVER_D", #RFC4280 34: "OPTION_BCMCS_SERVER_A", #RFC4280 36: "OPTION_GEOCONF_CIVIC", #RFC-ietf-geopriv-dhcp-civil-09.txt 37: "OPTION_REMOTE_ID", #RFC4649 38: "OPTION_SUBSCRIBER_ID", #RFC4580 39: "OPTION_CLIENT_FQDN" } #RFC4704 dhcp6opts_by_code = { 1: "DHCP6OptClientId", 2: "DHCP6OptServerId", 3: "DHCP6OptIA_NA", 4: "DHCP6OptIA_TA", 5: "DHCP6OptIAAddress", 6: "DHCP6OptOptReq", 7: "DHCP6OptPref", 8: "DHCP6OptElapsedTime", 9: "DHCP6OptRelayMsg", 11: "DHCP6OptAuth", 12: "DHCP6OptServerUnicast", 13: "DHCP6OptStatusCode", 14: "DHCP6OptRapidCommit", 15: "DHCP6OptUserClass", 16: "DHCP6OptVendorClass", 17: "DHCP6OptVendorSpecificInfo", 18: "DHCP6OptIfaceId", 19: "DHCP6OptReconfMsg", 20: "DHCP6OptReconfAccept", 21: "DHCP6OptSIPDomains", #RFC3319 22: "DHCP6OptSIPServers", #RFC3319 23: "DHCP6OptDNSServers", #RFC3646 24: "DHCP6OptDNSDomains", #RFC3646 25: "DHCP6OptIA_PD", #RFC3633 26: "DHCP6OptIAPrefix", #RFC3633 27: "DHCP6OptNISServers", #RFC3898 28: "DHCP6OptNISPServers", #RFC3898 29: "DHCP6OptNISDomain", #RFC3898 30: "DHCP6OptNISPDomain", #RFC3898 31: "DHCP6OptSNTPServers", #RFC4075 32: "DHCP6OptInfoRefreshTime", #RFC4242 33: "DHCP6OptBCMCSDomains", #RFC4280 34: "DHCP6OptBCMCSServers", #RFC4280 #36: "DHCP6OptGeoConf", #RFC-ietf-geopriv-dhcp-civil-09.txt 37: "DHCP6OptRemoteID", #RFC4649 38: "DHCP6OptSubscriberID", #RFC4580 39: "DHCP6OptClientFQDN", #RFC4704 #40: "DHCP6OptPANAAgent", #RFC-ietf-dhc-paa-option-05.txt #41: "DHCP6OptNewPOSIXTimeZone, #RFC4833 #42: "DHCP6OptNewTZDBTimeZone, #RFC4833 43: "DHCP6OptRelayAgentERO" #RFC4994 #44: "DHCP6OptLQQuery", #RFC5007 #45: "DHCP6OptLQClientData", #RFC5007 #46: "DHCP6OptLQClientTime", #RFC5007 #47: "DHCP6OptLQRelayData", #RFC5007 #48: "DHCP6OptLQClientLink", #RFC5007 } # sect 5.3 RFC 3315 : DHCP6 Messages types dhcp6types = { 1:"SOLICIT", 2:"ADVERTISE", 3:"REQUEST", 4:"CONFIRM", 5:"RENEW", 6:"REBIND", 7:"REPLY", 8:"RELEASE", 9:"DECLINE", 10:"RECONFIGURE", 11:"INFORMATION-REQUEST", 12:"RELAY-FORW", 13:"RELAY-REPL" } ##################################################################### ### DHCPv6 DUID related stuff ### ##################################################################### duidtypes = { 1: "Link-layer address plus time", 2: "Vendor-assigned unique ID based on Enterprise Number", 3: "Link-layer Address" } # DUID hardware types - RFC 826 - Extracted from # http://www.iana.org/assignments/arp-parameters on 31/10/06 # We should add the length of every kind of address. duidhwtypes = { 0: "NET/ROM pseudo", # Not referenced by IANA 1: "Ethernet (10Mb)", 2: "Experimental Ethernet (3Mb)", 3: "Amateur Radio AX.25", 4: "Proteon ProNET Token Ring", 5: "Chaos", 6: "IEEE 802 Networks", 7: "ARCNET", 8: "Hyperchannel", 9: "Lanstar", 10: "Autonet Short Address", 11: "LocalTalk", 12: "LocalNet (IBM PCNet or SYTEK LocalNET)", 13: "Ultra link", 14: "SMDS", 15: "Frame Relay", 16: "Asynchronous Transmission Mode (ATM)", 17: "HDLC", 18: "Fibre Channel", 19: "Asynchronous Transmission Mode (ATM)", 20: "Serial Line", 21: "Asynchronous Transmission Mode (ATM)", 22: "MIL-STD-188-220", 23: "Metricom", 24: "IEEE 1394.1995", 25: "MAPOS", 26: "Twinaxial", 27: "EUI-64", 28: "HIPARP", 29: "IP and ARP over ISO 7816-3", 30: "ARPSec", 31: "IPsec tunnel", 32: "InfiniBand (TM)", 33: "TIA-102 Project 25 Common Air Interface (CAI)" } class UTCTimeField(IntField): epoch = (2000, 1, 1, 0, 0, 0, 5, 1, 0) # required Epoch def i2repr(self, pkt, x): x = self.i2h(pkt, x) from time import gmtime, strftime, mktime delta = mktime(self.epoch) - mktime(gmtime(0)) x = x + delta t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(x)) return "%s (%d)" % (t, x) class _LLAddrField(MACField): pass # XXX We only support Ethernet addresses at the moment. _LLAddrField # will be modified when needed. Ask us. --arno class DUID_LLT(Packet): # sect 9.2 RFC 3315 name = "DUID - Link-layer address plus time" fields_desc = [ ShortEnumField("type", 1, duidtypes), XShortEnumField("hwtype", 1, duidhwtypes), UTCTimeField("timeval", 0), # i.e. 01 Jan 2000 _LLAddrField("lladdr", ETHER_ANY) ] # In fact, IANA enterprise-numbers file available at # http//www.iana.org/asignments/enterprise-numbers) # is simply huge (more than 2Mo and 600Ko in bz2). I'll # add only most common vendors, and encountered values. # -- arno iana_enterprise_num = { 9: "ciscoSystems", 35: "Nortel Networks", 43: "3Com", 311: "Microsoft", 2636: "Juniper Networks, Inc.", 4526: "Netgear", 5771: "Cisco Systems, Inc.", 5842: "Cisco Systems", 16885: "Nortel Networks" } class DUID_EN(Packet): # sect 9.3 RFC 3315 name = "DUID - Assigned by Vendor Based on Enterprise Number" fields_desc = [ ShortEnumField("type", 2, duidtypes), IntEnumField("enterprisenum", 311, iana_enterprise_num), StrField("id","") ] class DUID_LL(Packet): # sect 9.4 RFC 3315 name = "DUID - Based on Link-layer Address" fields_desc = [ ShortEnumField("type", 3, duidtypes), XShortEnumField("hwtype", 1, duidhwtypes), _LLAddrField("lladdr", ETHER_ANY) ] duid_cls = { 1: "DUID_LLT", 2: "DUID_EN", 3: "DUID_LL"} ##################################################################### ### DHCPv6 Options classes ### ##################################################################### class _DHCP6OptGuessPayload(Packet): def guess_payload_class(self, payload): cls = Raw if len(payload) > 2 : opt = struct.unpack("!H", payload[:2])[0] cls = get_cls(dhcp6opts_by_code.get(opt, "DHCP6OptUnknown"), DHCP6OptUnknown) return cls class DHCP6OptUnknown(_DHCP6OptGuessPayload): # A generic DHCPv6 Option name = "Unknown DHCPv6 OPtion" fields_desc = [ ShortEnumField("optcode", 0, dhcp6opts), FieldLenField("optlen", None, length_of="data", fmt="!H"), StrLenField("data", "", length_from = lambda pkt: pkt.optlen)] class _DUIDField(PacketField): holds_packets=1 def __init__(self, name, default, length_from=None): StrField.__init__(self, name, default) self.length_from = length_from def i2m(self, pkt, i): return str(i) def m2i(self, pkt, x): cls = Raw if len(x) > 4: o = struct.unpack("!H", x[:2])[0] cls = get_cls(duid_cls.get(o, Raw), "Raw") return cls(x) def getfield(self, pkt, s): l = self.length_from(pkt) return s[l:], self.m2i(pkt,s[:l]) class DHCP6OptClientId(_DHCP6OptGuessPayload): # RFC sect 22.2 name = "DHCP6 Client Identifier Option" fields_desc = [ ShortEnumField("optcode", 1, dhcp6opts), FieldLenField("optlen", None, length_of="duid", fmt="!H"), _DUIDField("duid", "", length_from = lambda pkt: pkt.optlen) ] class DHCP6OptServerId(DHCP6OptClientId): # RFC sect 22.3 name = "DHCP6 Server Identifier Option" __metaclass__ = NewDefaultValues optcode = 2 # Should be encapsulated in the option field of IA_NA or IA_TA options # Can only appear at that location. # TODO : last field IAaddr-options is not defined in the reference document class DHCP6OptIAAddress(_DHCP6OptGuessPayload): # RFC sect 22.6 name = "DHCP6 IA Address Option (IA_TA or IA_NA suboption)" fields_desc = [ ShortEnumField("optcode", 5, dhcp6opts), FieldLenField("optlen", None, length_of="iaaddropts", fmt="!H", adjust = lambda pkt,x: x+24), IP6Field("addr", "::"), IntField("preflft", 0), IntField("validlft", 0), XIntField("iaid", None), StrLenField("iaaddropts", "", length_from = lambda pkt: pkt.optlen - 24) ] def guess_payload_class(self, payload): return Padding class _IANAOptField(PacketListField): def i2len(self, pkt, z): if z is None or z == []: return 0 return sum(map(lambda x: len(str(x)) ,z)) def getfield(self, pkt, s): l = self.length_from(pkt) lst = [] remain, payl = s[:l], s[l:] while len(remain)>0: p = self.m2i(pkt,remain) if Padding in p: pad = p[Padding] remain = pad.load del(pad.underlayer.payload) else: remain = "" lst.append(p) return payl,lst class DHCP6OptIA_NA(_DHCP6OptGuessPayload): # RFC sect 22.4 name = "DHCP6 Identity Association for Non-temporary Addresses Option" fields_desc = [ ShortEnumField("optcode", 3, dhcp6opts), FieldLenField("optlen", None, length_of="ianaopts", fmt="!H", adjust = lambda pkt,x: x+12), XIntField("iaid", None), IntField("T1", None), IntField("T2", None), _IANAOptField("ianaopts", [], DHCP6OptIAAddress, length_from = lambda pkt: pkt.optlen-12) ] class _IATAOptField(_IANAOptField): pass class DHCP6OptIA_TA(_DHCP6OptGuessPayload): # RFC sect 22.5 name = "DHCP6 Identity Association for Temporary Addresses Option" fields_desc = [ ShortEnumField("optcode", 4, dhcp6opts), FieldLenField("optlen", None, length_of="iataopts", fmt="!H", adjust = lambda pkt,x: x+4), XIntField("iaid", None), _IATAOptField("iataopts", [], DHCP6OptIAAddress, length_from = lambda pkt: pkt.optlen-4) ] #### DHCPv6 Option Request Option ################################### class _OptReqListField(StrLenField): islist = 1 def i2h(self, pkt, x): if x is None: return [] return x def i2len(self, pkt, x): return 2*len(x) def any2i(self, pkt, x): return x def i2repr(self, pkt, x): s = [] for y in self.i2h(pkt, x): if dhcp6opts.has_key(y): s.append(dhcp6opts[y]) else: s.append("%d" % y) return "[%s]" % ", ".join(s) def m2i(self, pkt, x): r = [] while len(x) != 0: if len(x)<2: warning("Odd length for requested option field. Rejecting last byte") return r r.append(struct.unpack("!H", x[:2])[0]) x = x[2:] return r def i2m(self, pkt, x): return "".join(map(lambda y: struct.pack("!H", y), x)) # A client may include an ORO in a solicit, Request, Renew, Rebind, # Confirm or Information-request class DHCP6OptOptReq(_DHCP6OptGuessPayload): # RFC sect 22.7 name = "DHCP6 Option Request Option" fields_desc = [ ShortEnumField("optcode", 6, dhcp6opts), FieldLenField("optlen", None, length_of="reqopts", fmt="!H"), _OptReqListField("reqopts", [23, 24], length_from = lambda pkt: pkt.optlen) ] #### DHCPv6 Preference Option ####################################### # emise par un serveur pour affecter le choix fait par le client. Dans # les messages Advertise, a priori class DHCP6OptPref(_DHCP6OptGuessPayload): # RFC sect 22.8 name = "DHCP6 Preference Option" fields_desc = [ ShortEnumField("optcode", 7, dhcp6opts), ShortField("optlen", 1 ), ByteField("prefval",255) ] #### DHCPv6 Elapsed Time Option ##################################### class _ElapsedTimeField(ShortField): def i2repr(self, pkt, x): if x == 0xffff: return "infinity (0xffff)" return "%.2f sec" % (self.i2h(pkt, x)/100.) class DHCP6OptElapsedTime(_DHCP6OptGuessPayload):# RFC sect 22.9 name = "DHCP6 Elapsed Time Option" fields_desc = [ ShortEnumField("optcode", 8, dhcp6opts), ShortField("optlen", 2), _ElapsedTimeField("elapsedtime", 0) ] #### DHCPv6 Relay Message Option #################################### # Relayed message is seen as a payload. class DHCP6OptRelayMsg(_DHCP6OptGuessPayload):# RFC sect 22.10 name = "DHCP6 Relay Message Option" fields_desc = [ ShortEnumField("optcode", 9, dhcp6opts), ShortField("optlen", None ) ] def post_build(self, p, pay): if self.optlen is None: l = len(pay) p = p[:2]+struct.pack("!H", l) return p + pay #### DHCPv6 Authentication Option ################################### # The following fields are set in an Authentication option for the # Reconfigure Key Authentication Protocol: # # protocol 3 # # algorithm 1 # # RDM 0 # # The format of the Authentication information for the Reconfigure Key # Authentication Protocol is: # # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Type | Value (128 bits) | # +-+-+-+-+-+-+-+-+ | # . . # . . # . +-+-+-+-+-+-+-+-+ # | | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # # Type Type of data in Value field carried in this option: # # 1 Reconfigure Key value (used in Reply message). # # 2 HMAC-MD5 digest of the message (used in Reconfigure # message). # # Value Data as defined by field. # TODO : Decoding only at the moment class DHCP6OptAuth(_DHCP6OptGuessPayload): # RFC sect 22.11 name = "DHCP6 Option - Authentication" fields_desc = [ ShortEnumField("optcode", 11, dhcp6opts), FieldLenField("optlen", None, length_of="authinfo", adjust = lambda pkt,x: x+11), ByteField("proto", 3), # TODO : XXX ByteField("alg", 1), # TODO : XXX ByteField("rdm", 0), # TODO : XXX StrFixedLenField("replay", "A"*8, 8), # TODO: XXX StrLenField("authinfo", "", length_from = lambda pkt: pkt.optlen - 11) ] #### DHCPv6 Server Unicast Option ################################### class _SrvAddrField(IP6Field): def i2h(self, pkt, x): if x is None: return "::" return x def i2m(self, pkt, x): return inet_pton(socket.AF_INET6, self.i2h(pkt,x)) class DHCP6OptServerUnicast(_DHCP6OptGuessPayload):# RFC sect 22.12 name = "DHCP6 Server Unicast Option" fields_desc = [ ShortEnumField("optcode", 12, dhcp6opts), ShortField("optlen", 16 ), _SrvAddrField("srvaddr",None) ] #### DHCPv6 Status Code Option ###################################### dhcp6statuscodes = { 0:"Success", # sect 24.4 1:"UnspecFail", 2:"NoAddrsAvail", 3:"NoBinding", 4:"NotOnLink", 5:"UseMulticast", 6:"NoPrefixAvail"} # From RFC3633 class DHCP6OptStatusCode(_DHCP6OptGuessPayload):# RFC sect 22.13 name = "DHCP6 Status Code Option" fields_desc = [ ShortEnumField("optcode", 13, dhcp6opts), FieldLenField("optlen", None, length_of="statusmsg", fmt="!H", adjust = lambda pkt,x:x+2), ShortEnumField("statuscode",None,dhcp6statuscodes), StrLenField("statusmsg", "", length_from = lambda pkt: pkt.optlen-2) ] #### DHCPv6 Rapid Commit Option ##################################### class DHCP6OptRapidCommit(_DHCP6OptGuessPayload): # RFC sect 22.14 name = "DHCP6 Rapid Commit Option" fields_desc = [ ShortEnumField("optcode", 14, dhcp6opts), ShortField("optlen", 0)] #### DHCPv6 User Class Option ####################################### class _UserClassDataField(PacketListField): def i2len(self, pkt, z): if z is None or z == []: return 0 return sum(map(lambda x: len(str(x)) ,z)) def getfield(self, pkt, s): l = self.length_from(pkt) lst = [] remain, payl = s[:l], s[l:] while len(remain)>0: p = self.m2i(pkt,remain) if Padding in p: pad = p[Padding] remain = pad.load del(pad.underlayer.payload) else: remain = "" lst.append(p) return payl,lst class USER_CLASS_DATA(Packet): name = "user class data" fields_desc = [ FieldLenField("len", None, length_of="data"), StrLenField("data", "", length_from = lambda pkt: pkt.len) ] def guess_payload_class(self, payload): return Padding class DHCP6OptUserClass(_DHCP6OptGuessPayload):# RFC sect 22.15 name = "DHCP6 User Class Option" fields_desc = [ ShortEnumField("optcode", 15, dhcp6opts), FieldLenField("optlen", None, fmt="!H", length_of="userclassdata"), _UserClassDataField("userclassdata", [], USER_CLASS_DATA, length_from = lambda pkt: pkt.optlen) ] #### DHCPv6 Vendor Class Option ##################################### class _VendorClassDataField(_UserClassDataField): pass class VENDOR_CLASS_DATA(USER_CLASS_DATA): name = "vendor class data" class DHCP6OptVendorClass(_DHCP6OptGuessPayload):# RFC sect 22.16 name = "DHCP6 Vendor Class Option" fields_desc = [ ShortEnumField("optcode", 16, dhcp6opts), FieldLenField("optlen", None, length_of="vcdata", fmt="!H", adjust = lambda pkt,x: x+4), IntEnumField("enterprisenum",None , iana_enterprise_num ), _VendorClassDataField("vcdata", [], VENDOR_CLASS_DATA, length_from = lambda pkt: pkt.optlen-4) ] #### DHCPv6 Vendor-Specific Information Option ###################### class VENDOR_SPECIFIC_OPTION(_DHCP6OptGuessPayload): name = "vendor specific option data" fields_desc = [ ShortField("optcode", None), FieldLenField("optlen", None, length_of="optdata"), StrLenField("optdata", "", length_from = lambda pkt: pkt.optlen) ] def guess_payload_class(self, payload): return Padding # The third one that will be used for nothing interesting class DHCP6OptVendorSpecificInfo(_DHCP6OptGuessPayload):# RFC sect 22.17 name = "DHCP6 Vendor-specific Information Option" fields_desc = [ ShortEnumField("optcode", 17, dhcp6opts), FieldLenField("optlen", None, length_of="vso", fmt="!H", adjust = lambda pkt,x: x+4), IntEnumField("enterprisenum",None , iana_enterprise_num), _VendorClassDataField("vso", [], VENDOR_SPECIFIC_OPTION, length_from = lambda pkt: pkt.optlen-4) ] #### DHCPv6 Interface-ID Option ##################################### # Repasser sur cette option a la fin. Elle a pas l'air d'etre des # masses critique. class DHCP6OptIfaceId(_DHCP6OptGuessPayload):# RFC sect 22.18 name = "DHCP6 Interface-Id Option" fields_desc = [ ShortEnumField("optcode", 18, dhcp6opts), FieldLenField("optlen", None, fmt="!H", length_of="ifaceid"), StrLenField("ifaceid", "", length_from = lambda pkt: pkt.optlen) ] #### DHCPv6 Reconfigure Message Option ############################## # A server includes a Reconfigure Message option in a Reconfigure # message to indicate to the client whether the client responds with a # renew message or an Informatiion-request message. class DHCP6OptReconfMsg(_DHCP6OptGuessPayload): # RFC sect 22.19 name = "DHCP6 Reconfigure Message Option" fields_desc = [ ShortEnumField("optcode", 19, dhcp6opts), ShortField("optlen", 1 ), ByteEnumField("msgtype", 11, { 5:"Renew Message", 11:"Information Request"}) ] #### DHCPv6 Reconfigure Accept Option ############################### # A client uses the Reconfigure Accept option to announce to the # server whether the client is willing to accept Recoonfigure # messages, and a server uses this option to tell the client whether # or not to accept Reconfigure messages. The default behavior in the # absence of this option, means unwillingness to accept reconfigure # messages, or instruction not to accept Reconfigure messages, for the # client and server messages, respectively. class DHCP6OptReconfAccept(_DHCP6OptGuessPayload): # RFC sect 22.20 name = "DHCP6 Reconfigure Accept Option" fields_desc = [ ShortEnumField("optcode", 20, dhcp6opts), ShortField("optlen", 0)] # As required in Sect 8. of RFC 3315, Domain Names must be encoded as # described in section 3.1 of RFC 1035 # XXX Label should be at most 63 octets in length : we do not enforce it # Total length of domain should be 255 : we do not enforce it either class DomainNameListField(StrLenField): islist = 1 def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def m2i(self, pkt, x): res = [] while x: cur = [] while x and x[0] != '\x00': l = ord(x[0]) cur.append(x[1:l+1]) x = x[l+1:] res.append(".".join(cur)) if x and x[0] == '\x00': x = x[1:] return res def i2m(self, pkt, x): def conditionalTrailingDot(z): if z and z[-1] == '\x00': return z return z+'\x00' res = "" tmp = map(lambda y: map((lambda z: chr(len(z))+z), y.split('.')), x) return "".join(map(lambda x: conditionalTrailingDot("".join(x)), tmp)) class DHCP6OptSIPDomains(_DHCP6OptGuessPayload): #RFC3319 name = "DHCP6 Option - SIP Servers Domain Name List" fields_desc = [ ShortEnumField("optcode", 21, dhcp6opts), FieldLenField("optlen", None, length_of="sipdomains"), DomainNameListField("sipdomains", [], length_from = lambda pkt: pkt.optlen) ] class DHCP6OptSIPServers(_DHCP6OptGuessPayload): #RFC3319 name = "DHCP6 Option - SIP Servers IPv6 Address List" fields_desc = [ ShortEnumField("optcode", 22, dhcp6opts), FieldLenField("optlen", None, length_of="sipservers"), IP6ListField("sipservers", [], length_from = lambda pkt: pkt.optlen) ] class DHCP6OptDNSServers(_DHCP6OptGuessPayload): #RFC3646 name = "DHCP6 Option - DNS Recursive Name Server" fields_desc = [ ShortEnumField("optcode", 23, dhcp6opts), FieldLenField("optlen", None, length_of="dnsservers"), IP6ListField("dnsservers", [], length_from = lambda pkt: pkt.optlen) ] class DHCP6OptDNSDomains(_DHCP6OptGuessPayload): #RFC3646 name = "DHCP6 Option - Domain Search List option" fields_desc = [ ShortEnumField("optcode", 24, dhcp6opts), FieldLenField("optlen", None, length_of="dnsdomains"), DomainNameListField("dnsdomains", [], length_from = lambda pkt: pkt.optlen) ] # TODO: Implement iaprefopts correctly when provided with more # information about it. class DHCP6OptIAPrefix(_DHCP6OptGuessPayload): #RFC3633 name = "DHCP6 Option - IA_PD Prefix option" fields_desc = [ ShortEnumField("optcode", 26, dhcp6opts), FieldLenField("optlen", None, length_of="iaprefopts", adjust = lambda pkt,x: x+26), IntField("preflft", 0), IntField("validlft", 0), ByteField("plen", 48), # TODO: Challenge that default value IP6Field("prefix", "2001:db8::"), # At least, global and won't hurt StrLenField("iaprefopts", "", length_from = lambda pkt: pkt.optlen-26) ] class DHCP6OptIA_PD(_DHCP6OptGuessPayload): #RFC3633 name = "DHCP6 Option - Identity Association for Prefix Delegation" fields_desc = [ ShortEnumField("optcode", 25, dhcp6opts), FieldLenField("optlen", None, length_of="iapdopt", adjust = lambda pkt,x: x+12), IntField("iaid", 0), IntField("T1", 0), IntField("T2", 0), PacketListField("iapdopt", [], DHCP6OptIAPrefix, length_from = lambda pkt: pkt.optlen-12) ] class DHCP6OptNISServers(_DHCP6OptGuessPayload): #RFC3898 name = "DHCP6 Option - NIS Servers" fields_desc = [ ShortEnumField("optcode", 27, dhcp6opts), FieldLenField("optlen", None, length_of="nisservers"), IP6ListField("nisservers", [], length_from = lambda pkt: pkt.optlen) ] class DHCP6OptNISPServers(_DHCP6OptGuessPayload): #RFC3898 name = "DHCP6 Option - NIS+ Servers" fields_desc = [ ShortEnumField("optcode", 28, dhcp6opts), FieldLenField("optlen", None, length_of="nispservers"), IP6ListField("nispservers", [], length_from = lambda pkt: pkt.optlen) ] class DomainNameField(StrLenField): def getfield(self, pkt, s): l = self.length_from(pkt) return s[l:], self.m2i(pkt,s[:l]) def i2len(self, pkt, x): return len(self.i2m(pkt, x)) def m2i(self, pkt, x): save = x cur = [] while x and x[0] != '\x00': l = ord(x[0]) cur.append(x[1:1+l]) x = x[l+1:] if x[0] != '\x00': print "Found weird domain: '%s'. Keeping %s" % (save, x) return ".".join(cur) def i2m(self, pkt, x): def conditionalTrailingDot(z): if (z and z[-1] == '\x00'): return z return z+'\x00' if not x: return "" tmp = "".join(map(lambda z: chr(len(z))+z, x.split('.'))) return conditionalTrailingDot(tmp) class DHCP6OptNISDomain(_DHCP6OptGuessPayload): #RFC3898 name = "DHCP6 Option - NIS Domain Name" fields_desc = [ ShortEnumField("optcode", 29, dhcp6opts), FieldLenField("optlen", None, length_of="nisdomain"), DomainNameField("nisdomain", "", length_from = lambda pkt: pkt.optlen) ] class DHCP6OptNISPDomain(_DHCP6OptGuessPayload): #RFC3898 name = "DHCP6 Option - NIS+ Domain Name" fields_desc = [ ShortEnumField("optcode", 30, dhcp6opts), FieldLenField("optlen", None, length_of="nispdomain"), DomainNameField("nispdomain", "", length_from= lambda pkt: pkt.optlen) ] class DHCP6OptSNTPServers(_DHCP6OptGuessPayload): #RFC4075 name = "DHCP6 option - SNTP Servers" fields_desc = [ ShortEnumField("optcode", 31, dhcp6opts), FieldLenField("optlen", None, length_of="sntpservers"), IP6ListField("sntpservers", [], length_from = lambda pkt: pkt.optlen) ] IRT_DEFAULT=86400 IRT_MINIMUM=600 class DHCP6OptInfoRefreshTime(_DHCP6OptGuessPayload): #RFC4242 name = "DHCP6 Option - Information Refresh Time" fields_desc = [ ShortEnumField("optcode", 32, dhcp6opts), ShortField("optlen", 4), IntField("reftime", IRT_DEFAULT)] # One day class DHCP6OptBCMCSDomains(_DHCP6OptGuessPayload): #RFC4280 name = "DHCP6 Option - BCMCS Domain Name List" fields_desc = [ ShortEnumField("optcode", 33, dhcp6opts), FieldLenField("optlen", None, length_of="bcmcsdomains"), DomainNameListField("bcmcsdomains", [], length_from = lambda pkt: pkt.optlen) ] class DHCP6OptBCMCSServers(_DHCP6OptGuessPayload): #RFC4280 name = "DHCP6 Option - BCMCS Addresses List" fields_desc = [ ShortEnumField("optcode", 34, dhcp6opts), FieldLenField("optlen", None, length_of="bcmcsservers"), IP6ListField("bcmcsservers", [], length_from= lambda pkt: pkt.optlen) ] # TODO : Does Nothing at the moment class DHCP6OptGeoConf(_DHCP6OptGuessPayload): #RFC-ietf-geopriv-dhcp-civil-09.txt name = "" fields_desc = [ ShortEnumField("optcode", 36, dhcp6opts), FieldLenField("optlen", None, length_of="optdata"), StrLenField("optdata", "", length_from = lambda pkt: pkt.optlen) ] # TODO: see if we encounter opaque values from vendor devices class DHCP6OptRemoteID(_DHCP6OptGuessPayload): #RFC4649 name = "DHCP6 Option - Relay Agent Remote-ID" fields_desc = [ ShortEnumField("optcode", 37, dhcp6opts), FieldLenField("optlen", None, length_of="remoteid", adjust = lambda pkt,x: x+4), IntEnumField("enterprisenum", None, iana_enterprise_num), StrLenField("remoteid", "", length_from = lambda pkt: pkt.optlen-4) ] # TODO : 'subscriberid' default value should be at least 1 byte long class DHCP6OptSubscriberID(_DHCP6OptGuessPayload): #RFC4580 name = "DHCP6 Option - Subscriber ID" fields_desc = [ ShortEnumField("optcode", 38, dhcp6opts), FieldLenField("optlen", None, length_of="subscriberid"), StrLenField("subscriberid", "", length_from = lambda pkt: pkt.optlen) ] # TODO : "The data in the Domain Name field MUST be encoded # as described in Section 8 of [5]" class DHCP6OptClientFQDN(_DHCP6OptGuessPayload): #RFC4704 name = "DHCP6 Option - Client FQDN" fields_desc = [ ShortEnumField("optcode", 39, dhcp6opts), FieldLenField("optlen", None, length_of="fqdn", adjust = lambda pkt,x: x+1), BitField("res", 0, 5), FlagsField("flags", 0, 3, "SON" ), DomainNameField("fqdn", "", length_from = lambda pkt: pkt.optlen-1) ] class DHCP6OptRelayAgentERO(_DHCP6OptGuessPayload): # RFC4994 name = "DHCP6 Option - RelayRequest Option" fields_desc = [ ShortEnumField("optcode", 43, dhcp6opts), FieldLenField("optlen", None, length_of="reqopts", fmt="!H"), _OptReqListField("reqopts", [23, 24], length_from = lambda pkt: pkt.optlen) ] ##################################################################### ### DHCPv6 messages ### ##################################################################### # Some state parameters of the protocols that should probably be # useful to have in the configuration (and keep up-to-date) DHCP6RelayAgentUnicastAddr="" DHCP6RelayHopCount="" DHCP6ServerUnicastAddr="" DHCP6ClientUnicastAddr="" DHCP6ClientIA_TA="" DHCP6ClientIA_NA="" DHCP6ClientIAID="" T1="" # Voir 2462 T2="" # Voir 2462 DHCP6ServerDUID="" DHCP6CurrentTransactionID="" # devrait etre utilise pour matcher une # reponse et mis a jour en mode client par une valeur aleatoire pour # laquelle on attend un retour de la part d'un serveur. DHCP6PrefVal="" # la valeur de preference a utiliser dans # les options preference # Emitted by : # - server : ADVERTISE, REPLY, RECONFIGURE, RELAY-REPL (vers relay) # - client : SOLICIT, REQUEST, CONFIRM, RENEW, REBIND, RELEASE, DECLINE, # INFORMATION REQUEST # - relay : RELAY-FORW (toward server) class _DHCP6GuessPayload(Packet): def guess_payload_class(self, payload): if len(payload) > 1 : print ord(payload[0]) return get_cls(dhcp6opts.get(ord(payload[0]),"DHCP6OptUnknown"), Raw) return Raw ##################################################################### ## DHCPv6 messages sent between Clients and Servers (types 1 to 11) # Comme specifie en section 15.1 de la RFC 3315, les valeurs de # transaction id sont selectionnees de maniere aleatoire par le client # a chaque emission et doivent matcher dans les reponses faites par # les clients class DHCP6(_DHCP6OptGuessPayload): name = "DHCPv6 Generic Message)" fields_desc = [ ByteEnumField("msgtype",None,dhcp6types), X3BytesField("trid",0x000000) ] overload_fields = { UDP: {"sport": 546, "dport": 547} } def hashret(self): return struct.pack("!I", self.trid)[1:4] ##################################################################### # Solicit Message : sect 17.1.1 RFC3315 # - sent by client # - must include a client identifier option # - the client may include IA options for any IAs to which it wants the # server to assign address # - The client use IA_NA options to request the assignment of # non-temporary addresses and uses IA_TA options to request the # assignment of temporary addresses # - The client should include an Option Request option to indicate the # options the client is interested in receiving (eventually # including hints) # - The client includes a Reconfigure Accept option if is willing to # accept Reconfigure messages from the server. # Le cas du send and reply est assez particulier car suivant la # presence d'une option rapid commit dans le solicit, l'attente # s'arrete au premier message de reponse recu ou alors apres un # timeout. De la meme maniere, si un message Advertise arrive avec une # valeur de preference de 255, il arrete l'attente et envoie une # Request. # - The client announces its intention to use DHCP authentication by # including an Authentication option in its solicit message. The # server selects a key for the client based on the client's DUID. The # client and server use that key to authenticate all DHCP messages # exchanged during the session class DHCP6_Solicit(DHCP6): name = "DHCPv6 Solicit Message" __metaclass__ = NewDefaultValues msgtype = 1 overload_fields = { UDP: {"sport": 546, "dport": 547} } ##################################################################### # Advertise Message # - sent by server # - Includes a server identifier option # - Includes a client identifier option # - the client identifier option must match the client's DUID # - transaction ID must match class DHCP6_Advertise(DHCP6): name = "DHCPv6 Advertise Message" __metaclass__ = NewDefaultValues msgtype = 2 overload_fields = { UDP: {"sport": 547, "dport": 546} } def answers(self, other): return (isinstance(other,DHCP6_Solicit) and other.msgtype == 1 and self.trid == other.trid) ##################################################################### # Request Message # - sent by clients # - includes a server identifier option # - the content of Server Identifier option must match server's DUID # - includes a client identifier option # - must include an ORO Option (even with hints) p40 # - can includes a reconfigure Accept option indicating whether or # not the client is willing to accept Reconfigure messages from # the server (p40) # - When the server receives a Request message via unicast from a # client to which the server has not sent a unicast option, the server # discards the Request message and responds with a Reply message # containinig Status Code option with the value UseMulticast, a Server # Identifier Option containing the server's DUID, the client # Identifier option from the client message and no other option. class DHCP6_Request(DHCP6): name = "DHCPv6 Request Message" __metaclass__ = NewDefaultValues msgtype = 3 ##################################################################### # Confirm Message # - sent by clients # - must include a clien identifier option # - When the server receives a Confirm Message, the server determines # whether the addresses in the Confirm message are appropriate for the # link to which the client is attached. cf p50 class DHCP6_Confirm(DHCP6): name = "DHCPv6 Confirm Message" __metaclass__ = NewDefaultValues msgtype = 4 ##################################################################### # Renew Message # - sent by clients # - must include a server identifier option # - content of server identifier option must match the server's identifier # - must include a client identifier option # - the clients includes any IA assigned to the interface that may # have moved to a new link, along with the addresses associated with # those IAs in its confirm messages # - When the server receives a Renew message that contains an IA # option from a client, it locates the client's binding and verifies # that the information in the IA from the client matches the # information for that client. If the server cannot find a client # entry for the IA the server returns the IA containing no addresses # with a status code option est to NoBinding in the Reply message. cf # p51 pour le reste. class DHCP6_Renew(DHCP6): name = "DHCPv6 Renew Message" __metaclass__ = NewDefaultValues msgtype = 5 ##################################################################### # Rebind Message # - sent by clients # - must include a client identifier option # cf p52 class DHCP6_Rebind(DHCP6): name = "DHCPv6 Rebind Message" __metaclass__ = NewDefaultValues msgtype = 6 ##################################################################### # Reply Message # - sent by servers # - the message must include a server identifier option # - transaction-id field must match the value of original message # The server includes a Rapid Commit option in the Reply message to # indicate that the reply is in response to a solicit message # - if the client receives a reply message with a Status code option # with the value UseMulticast, the client records the receipt of the # message and sends subsequent messages to the server through the # interface on which the message was received using multicast. The # client resends the original message using multicast # - When the client receives a NotOnLink status from the server in # response to a Confirm message, the client performs DHCP server # solicitation as described in section 17 and client-initiated # configuration as descrribed in section 18 (RFC 3315) # - when the client receives a NotOnLink status from the server in # response to a Request, the client can either re-issue the Request # without specifying any addresses or restart the DHCP server # discovery process. # - the server must include a server identifier option containing the # server's DUID in the Reply message class DHCP6_Reply(DHCP6): name = "DHCPv6 Reply Message" __metaclass__ = NewDefaultValues msgtype = 7 def answers(self, other): return (isinstance(other, DHCP6_InfoRequest) and self.trid == other.trid) ##################################################################### # Release Message # - sent by clients # - must include a server identifier option # cf p53 class DHCP6_Release(DHCP6): name = "DHCPv6 Release Message" __metaclass__ = NewDefaultValues msgtype = 8 ##################################################################### # Decline Message # - sent by clients # - must include a client identifier option # - Server identifier option must match server identifier # - The addresses to be declined must be included in the IAs. Any # addresses for the IAs the client wishes to continue to use should # not be in added to the IAs. # - cf p54 class DHCP6_Decline(DHCP6): name = "DHCPv6 Decline Message" __metaclass__ = NewDefaultValues msgtype = 9 ##################################################################### # Reconfigure Message # - sent by servers # - must be unicast to the client # - must include a server identifier option # - must include a client identifier option that contains the client DUID # - must contain a Reconfigure Message Option and the message type # must be a valid value # - the server sets the transaction-id to 0 # - The server must use DHCP Authentication in the Reconfigure # message. Autant dire que ca va pas etre le type de message qu'on va # voir le plus souvent. class DHCP6_Reconf(DHCP6): name = "DHCPv6 Reconfigure Message" __metaclass__ = NewDefaultValues msgtype = 10 overload_fields = { UDP: { "sport": 547, "dport": 546 } } ##################################################################### # Information-Request Message # - sent by clients when needs configuration information but no # addresses. # - client should include a client identifier option to identify # itself. If it doesn't the server is not able to return client # specific options or the server can choose to not respond to the # message at all. The client must include a client identifier option # if the message will be authenticated. # - client must include an ORO of option she's interested in receiving # (can include hints) class DHCP6_InfoRequest(DHCP6): name = "DHCPv6 Information Request Message" __metaclass__ = NewDefaultValues msgtype = 11 def hashret(self): return struct.pack("!I", self.trid)[1:3] ##################################################################### # sent between Relay Agents and Servers # # Normalement, doit inclure une option "Relay Message Option" # peut en inclure d'autres. # voir section 7.1 de la 3315 # Relay-Forward Message # - sent by relay agents to servers # If the relay agent relays messages to the All_DHCP_Servers multicast # address or other multicast addresses, it sets the Hop Limit field to # 32. class DHCP6_RelayForward(_DHCP6GuessPayload,Packet): name = "DHCPv6 Relay Forward Message (Relay Agent/Server Message)" fields_desc = [ ByteEnumField("msgtype", 12, dhcp6types), ShortField("hopcount", None), IP6Field("linkaddr", "::"), IP6Field("peeraddr", "::") ] def hashret(self): # we filter on peer address field return inet_pton(socket.AF_INET6, self.peeraddr) ##################################################################### # sent between Relay Agents and Servers # Normalement, doit inclure une option "Relay Message Option" # peut en inclure d'autres. # Les valeurs des champs hop-count, link-addr et peer-addr # sont copiees du messsage Forward associe. POur le suivi de session. # Pour le moment, comme decrit dans le commentaire, le hashret # se limite au contenu du champ peer address. # Voir section 7.2 de la 3315. # Relay-Reply Message # - sent by servers to relay agents # - if the solicit message was received in a Relay-Forward message, # the server constructs a relay-reply message with the Advertise # message in the payload of a relay-message. cf page 37/101. Envoie de # ce message en unicast au relay-agent. utilisation de l'adresse ip # presente en ip source du paquet recu class DHCP6_RelayReply(DHCP6_RelayForward): name = "DHCPv6 Relay Reply Message (Relay Agent/Server Message)" __metaclass__= NewDefaultValues msgtype = 13 def hashret(self): # We filter on peer address field. return inet_pton(socket.AF_INET6, self.peeraddr) def answers(self, other): return (isinstance(other, DHCP6_RelayForward) and self.count == other.count and self.linkaddr == other.linkaddr and self.peeraddr == other.peeraddr ) dhcp6_cls_by_type = { 1: "DHCP6_Solicit", 2: "DHCP6_Advertise", 3: "DHCP6_Request", 4: "DHCP6_Confirm", 5: "DHCP6_Renew", 6: "DHCP6_Rebind", 7: "DHCP6_Reply", 8: "DHCP6_Release", 9: "DHCP6_Decline", 10: "DHCP6_Reconf", 11: "DHCP6_InfoRequest", 12: "DHCP6_RelayForward", 13: "DHCP6_RelayReply" } def _dhcp6_dispatcher(x, *args, **kargs): cls = Raw if len(x) >= 2: cls = get_cls(dhcp6_cls_by_type.get(ord(x[0]), "Raw"), Raw) return cls(x, *args, **kargs) bind_bottom_up(UDP, _dhcp6_dispatcher, { "dport": 547 } ) bind_bottom_up(UDP, _dhcp6_dispatcher, { "dport": 546 } ) class DHCPv6_am(AnsweringMachine): function_name = "dhcp6d" filter = "udp and port 546 and port 547" send_function = staticmethod(send) def usage(self): msg = """ dhcp6d( dns="2001:500::1035", domain="localdomain, local", duid=None) iface=conf.iface, advpref=255, sntpservers=None, sipdomains=None, sipservers=None, nisdomain=None, nisservers=None, nispdomain=None, nispservers=None, bcmcsdomain=None, bcmcsservers=None) debug : When set, additional debugging information is printed. duid : some DUID class (DUID_LLT, DUID_LL or DUID_EN). If none is provided a DUID_LLT is constructed based on the MAC address of the sending interface and launch time of dhcp6d answering machine. iface : the interface to listen/reply on if you do not want to use conf.iface. advpref : Value in [0,255] given to Advertise preference field. By default, 255 is used. Be aware that this specific value makes clients stops waiting for further Advertise messages from other servers. dns : list of recursive DNS servers addresses (as a string or list). By default, it is set empty and the associated DHCP6OptDNSServers option is inactive. See RFC 3646 for details. domain : a list of DNS search domain (as a string or list). By default, it is empty and the associated DHCP6OptDomains option is inactive. See RFC 3646 for details. sntpservers : a list of SNTP servers IPv6 addresses. By default, it is empty and the associated DHCP6OptSNTPServers option is inactive. sipdomains : a list of SIP domains. By default, it is empty and the associated DHCP6OptSIPDomains option is inactive. See RFC 3319 for details. sipservers : a list of SIP servers IPv6 addresses. By default, it is empty and the associated DHCP6OptSIPDomains option is inactive. See RFC 3319 for details. nisdomain : a list of NIS domains. By default, it is empty and the associated DHCP6OptNISDomains option is inactive. See RFC 3898 for details. See RFC 3646 for details. nisservers : a list of NIS servers IPv6 addresses. By default, it is empty and the associated DHCP6OptNISServers option is inactive. See RFC 3646 for details. nispdomain : a list of NIS+ domains. By default, it is empty and the associated DHCP6OptNISPDomains option is inactive. See RFC 3898 for details. nispservers : a list of NIS+ servers IPv6 addresses. By default, it is empty and the associated DHCP6OptNISServers option is inactive. See RFC 3898 for details. bcmcsdomain : a list of BCMCS domains. By default, it is empty and the associated DHCP6OptBCMCSDomains option is inactive. See RFC 4280 for details. bcmcsservers : a list of BCMCS servers IPv6 addresses. By default, it is empty and the associated DHCP6OptBCMCSServers option is inactive. See RFC 4280 for details. If you have a need for others, just ask ... or provide a patch.""" print msg def parse_options(self, dns="2001:500::1035", domain="localdomain, local", startip="2001:db8::1", endip="2001:db8::20", duid=None, sntpservers=None, sipdomains=None, sipservers=None, nisdomain=None, nisservers=None, nispdomain=None, nispservers=None, bcmcsservers=None, bcmcsdomains=None, iface=conf.iface, debug=0, advpref=255): def norm_list(val, param_name): if val is None: return None if type(val) is list: return val elif type(val) is str: l = val.split(',') return map(lambda x: x.strip(), l) else: print "Bad '%s' parameter provided." % param_name self.usage() return -1 self.debug = debug # Dictionary of provided DHCPv6 options, keyed by option type self.dhcpv6_options={} for o in [(dns, "dns", 23, lambda x: DHCP6OptDNSServers(dnsservers=x)), (domain, "domain", 24, lambda x: DHCP6OptDNSDomains(dnsdomains=x)), (sntpservers, "sntpservers", 31, lambda x: DHCP6OptSNTPServers(sntpservers=x)), (sipservers, "sipservers", 22, lambda x: DHCP6OptSIPServers(sipservers=x)), (sipdomains, "sipdomains", 21, lambda x: DHCP6OptSIPDomains(sipdomains=x)), (nisservers, "nisservers", 27, lambda x: DHCP6OptNISServers(nisservers=x)), (nisdomain, "nisdomain", 29, lambda x: DHCP6OptNISDomain(nisdomain=(x+[""])[0])), (nispservers, "nispservers", 28, lambda x: DHCP6OptNISPServers(nispservers=x)), (nispdomain, "nispdomain", 30, lambda x: DHCP6OptNISPDomain(nispdomain=(x+[""])[0])), (bcmcsservers, "bcmcsservers", 33, lambda x: DHCP6OptBCMCSServers(bcmcsservers=x)), (bcmcsdomains, "bcmcsdomains", 34, lambda x: DHCP6OptBCMCSDomains(bcmcsdomains=x))]: opt = norm_list(o[0], o[1]) if opt == -1: # Usage() was triggered return False elif opt is None: # We won't return that option pass else: self.dhcpv6_options[o[2]] = o[3](opt) if self.debug: print "\n[+] List of active DHCPv6 options:" opts = self.dhcpv6_options.keys() opts.sort() for i in opts: print " %d: %s" % (i, repr(self.dhcpv6_options[i])) # Preference value used in Advertise. self.advpref = advpref # IP Pool self.startip = startip self.endip = endip # XXX TODO Check IPs are in same subnet #### # The interface we are listening/replying on self.iface = iface #### # Generate a server DUID if duid is not None: self.duid = duid else: # Timeval from time import gmtime, strftime, mktime epoch = (2000, 1, 1, 0, 0, 0, 5, 1, 0) delta = mktime(epoch) - mktime(gmtime(0)) timeval = time.time() - delta # Mac Address rawmac = get_if_raw_hwaddr(iface)[1] mac = ":".join(map(lambda x: "%.02x" % ord(x), list(rawmac))) self.duid = DUID_LLT(timeval = timeval, lladdr = mac) if self.debug: print "\n[+] Our server DUID:" self.duid.show(label_lvl=" "*4) #### # Find the source address we will use l = filter(lambda x: x[2] == iface and in6_islladdr(x[0]), in6_getifaddr()) if not l: warning("Unable to get a Link-Local address") return self.src_addr = l[0][0] #### # Our leases self.leases = {} if self.debug: print "\n[+] Starting DHCPv6 service on %s:" % self.iface def is_request(self, p): if not IPv6 in p: return False src = p[IPv6].src dst = p[IPv6].dst p = p[IPv6].payload if not isinstance(p, UDP) or p.sport != 546 or p.dport != 547 : return False p = p.payload if not isinstance(p, DHCP6): return False # Message we considered client messages : # Solicit (1), Request (3), Confirm (4), Renew (5), Rebind (6) # Decline (9), Release (8), Information-request (11), if not (p.msgtype in [1, 3, 4, 5, 6, 8, 9, 11]): return False # Message validation following section 15 of RFC 3315 if ((p.msgtype == 1) or # Solicit (p.msgtype == 6) or # Rebind (p.msgtype == 4)): # Confirm if ((not DHCP6OptClientId in p) or DHCP6OptServerId in p): return False if (p.msgtype == 6 or # Rebind p.msgtype == 4): # Confirm # XXX We do not reply to Confirm or Rebind as we # XXX do not support address assignment return False elif (p.msgtype == 3 or # Request p.msgtype == 5 or # Renew p.msgtype == 8): # Release # Both options must be present if ((not DHCP6OptServerId in p) or (not DHCP6OptClientId in p)): return False # provided server DUID must match ours duid = p[DHCP6OptServerId].duid if (type(duid) != type(self.duid)): return False if str(duid) != str(self.duid): return False if (p.msgtype == 5 or # Renew p.msgtype == 8): # Release # XXX We do not reply to Renew or Release as we # XXX do not support address assignment return False elif p.msgtype == 9: # Decline # XXX We should check if we are tracking that client if not self.debug: return False bo = Color.bold g = Color.green + bo b = Color.blue + bo n = Color.normal r = Color.red vendor = in6_addrtovendor(src) if (vendor and vendor != "UNKNOWN"): vendor = " [" + b + vendor + n + "]" else: vendor = "" src = bo + src + n it = p addrs = [] while it: l = [] if isinstance(it, DHCP6OptIA_NA): l = it.ianaopts elif isinstance(it, DHCP6OptIA_TA): l = it.iataopts opsaddr = filter(lambda x: isinstance(x, DHCP6OptIAAddress),l) a=map(lambda x: x.addr, opsaddr) addrs += a it = it.payload addrs = map(lambda x: bo + x + n, addrs) if debug: msg = r + "[DEBUG]" + n + " Received " + g + "Decline" + n msg += " from " + bo + src + vendor + " for " msg += ", ".join(addrs)+ n print msg # See sect 18.1.7 # Sent by a client to warn us she has determined # one or more addresses assigned to her is already # used on the link. # We should simply log that fact. No messaged should # be sent in return. # - Message must include a Server identifier option # - the content of the Server identifier option must # match the server's identifier # - the message must include a Client Identifier option return False elif p.msgtype == 11: # Information-Request if DHCP6OptServerId in p: duid = p[DHCP6OptServerId].duid if (type(duid) != type(self.duid)): return False if str(duid) != str(self.duid): return False if ((DHCP6OptIA_NA in p) or (DHCP6OptIA_TA in p) or (DHCP6OptIA_PD in p)): return False else: return False return True def print_reply(self, req, reply): def norm(s): if s.startswith("DHCPv6 "): s = s[7:] if s.endswith(" Message"): s = s[:-8] return s if reply is None: return bo = Color.bold g = Color.green + bo b = Color.blue + bo n = Color.normal reqtype = g + norm(req.getlayer(UDP).payload.name) + n reqsrc = req.getlayer(IPv6).src vendor = in6_addrtovendor(reqsrc) if (vendor and vendor != "UNKNOWN"): vendor = " [" + b + vendor + n + "]" else: vendor = "" reqsrc = bo + reqsrc + n reptype = g + norm(reply.getlayer(UDP).payload.name) + n print "Sent %s answering to %s from %s%s" % (reptype, reqtype, reqsrc, vendor) def make_reply(self, req): req_mac_src = req.src req_mac_dst = req.dst p = req[IPv6] req_src = p.src req_dst = p.dst p = p.payload.payload msgtype = p.msgtype trid = p.trid if msgtype == 1: # SOLICIT (See Sect 17.1 and 17.2 of RFC 3315) # XXX We don't support address or prefix assignment # XXX We also do not support relay function --arno client_duid = p[DHCP6OptClientId].duid resp = IPv6(src=self.src_addr, dst=req_src) resp /= UDP(sport=547, dport=546) if p.haslayer(DHCP6OptRapidCommit): # construct a Reply packet resp /= DHCP6_Reply(trid=trid) resp /= DHCP6OptRapidCommit() # See 17.1.2 resp /= DHCP6OptServerId(duid = self.duid) resp /= DHCP6OptClientId(duid = client_duid) else: # No Rapid Commit in the packet. Reply with an Advertise if (p.haslayer(DHCP6OptIA_NA) or p.haslayer(DHCP6OptIA_TA)): # XXX We don't assign addresses at the moment msg = "Scapy6 dhcp6d does not support address assignment" resp /= DHCP6_Advertise(trid = trid) resp /= DHCP6OptStatusCode(statuscode=2, statusmsg=msg) resp /= DHCP6OptServerId(duid = self.duid) resp /= DHCP6OptClientId(duid = client_duid) elif p.haslayer(DHCP6OptIA_PD): # XXX We don't assign prefixes at the moment msg = "Scapy6 dhcp6d does not support prefix assignment" resp /= DHCP6_Advertise(trid = trid) resp /= DHCP6OptStatusCode(statuscode=6, statusmsg=msg) resp /= DHCP6OptServerId(duid = self.duid) resp /= DHCP6OptClientId(duid = client_duid) else: # Usual case, no request for prefixes or addresse resp /= DHCP6_Advertise(trid = trid) resp /= DHCP6OptPref(prefval = self.advpref) resp /= DHCP6OptServerId(duid = self.duid) resp /= DHCP6OptClientId(duid = client_duid) resp /= DHCP6OptReconfAccept() # See which options should be included reqopts = [] if p.haslayer(DHCP6OptOptReq): # add only asked ones reqopts = p[DHCP6OptOptReq].reqopts for o in self.dhcpv6_options.keys(): if o in reqopts: resp /= self.dhcpv6_options[o] else: # advertise everything we have available for o in self.dhcpv6_options.keys(): resp /= self.dhcpv6_options[o] return resp elif msgtype == 3: #REQUEST (INFO-REQUEST is further below) client_duid = p[DHCP6OptClientId].duid resp = IPv6(src=self.src_addr, dst=req_src) resp /= UDP(sport=547, dport=546) resp /= DHCP6_Solicit(trid=trid) resp /= DHCP6OptServerId(duid = self.duid) resp /= DHCP6OptClientId(duid = client_duid) # See which options should be included reqopts = [] if p.haslayer(DHCP6OptOptReq): # add only asked ones reqopts = p[DHCP6OptOptReq].reqopts for o in self.dhcpv6_options.keys(): if o in reqopts: resp /= self.dhcpv6_options[o] else: # advertise everything we have available. # Should not happen has clients MUST include # and ORO in requests (sec 18.1.1) -- arno for o in self.dhcpv6_options.keys(): resp /= self.dhcpv6_options[o] return resp elif msgtype == 4: # CONFIRM # see Sect 18.1.2 # Client want to check if addresses it was assigned # are still appropriate # Server must discard any Confirm messages that # do not include a Client Identifier option OR # THAT DO INCLUDE a Server Identifier Option # XXX we must discard the SOLICIT if it is received with # a unicast destination address pass elif msgtype == 5: # RENEW # see Sect 18.1.3 # Clients want to extend lifetime of assigned addresses # and update configuration parameters. This message is sent # specifically to the server that provided her the info # - Received message must include a Server Identifier # option. # - the content of server identifier option must match # the server's identifier. # - the message must include a Client identifier option pass elif msgtype == 6: # REBIND # see Sect 18.1.4 # Same purpose as the Renew message but sent to any # available server after he received no response # to its previous Renew message. # - Message must include a Client Identifier Option # - Message can't include a Server identifier option # XXX we must discard the SOLICIT if it is received with # a unicast destination address pass elif msgtype == 8: # RELEASE # See section 18.1.6 # Message is sent to the server to indicate that # she will no longer use the addresses that was assigned # We should parse the message and verify our dictionary # to log that fact. # - The message must include a server identifier option # - The content of the Server Identifier option must # match the server's identifier # - the message must include a Client Identifier option pass elif msgtype == 9: # DECLINE # See section 18.1.7 pass elif msgtype == 11: # INFO-REQUEST client_duid = None if not p.haslayer(DHCP6OptClientId): if self.debug: warning("Received Info Request message without Client Id option") else: client_duid = p[DHCP6OptClientId].duid resp = IPv6(src=self.src_addr, dst=req_src) resp /= UDP(sport=547, dport=546) resp /= DHCP6_Reply(trid=trid) resp /= DHCP6OptServerId(duid = self.duid) if client_duid: resp /= DHCP6OptClientId(duid = client_duid) # Stack requested options if available reqopts = [] if p.haslayer(DHCP6OptOptReq): reqopts = p[DHCP6OptOptReq].reqopts for o in self.dhcpv6_options.keys(): resp /= self.dhcpv6_options[o] return resp else: # what else ? pass # - We won't support reemission # - We won't support relay role, nor relay forwarded messages # at the beginning ############################################################################# ############################################################################# ### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ### ############################################################################# ############################################################################# # Mobile IPv6 ICMPv6 related classes class ICMPv6HAADRequest(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Request' fields_desc = [ ByteEnumField("type", 144, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15) ] def hashret(self): return struct.pack("!H",self.id)+self.payload.hashret() class ICMPv6HAADReply(_ICMPv6): name = 'ICMPv6 Home Agent Address Discovery Reply' fields_desc = [ ByteEnumField("type", 145, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("R", 1, 1, {1: 'MR'}), XBitField("res", 0, 15), IP6ListField('addresses', None) ] def hashret(self): return struct.pack("!H",self.id)+self.payload.hashret() def answers(self, other): if not isinstance(other, ICMPv6HAADRequest): return 0 return self.id == other.id class ICMPv6MPSol(_ICMPv6): name = 'ICMPv6 Mobile Prefix Solicitation' fields_desc = [ ByteEnumField("type", 146, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), XShortField("res", 0) ] def _hashret(self): return struct.pack("!H",self.id) class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6): name = 'ICMPv6 Mobile Prefix Advertisement' fields_desc = [ ByteEnumField("type", 147, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}), XBitField("res", 0, 14) ] def hashret(self): return struct.pack("!H",self.id) def answers(self, other): return isinstance(other, ICMPv6MPSol) # Mobile IPv6 Options classes _mobopttypes = { 2: "Binding Refresh Advice", 3: "Alternate Care-of Address", 4: "Nonce Indices", 5: "Binding Authorization Data", 6: "Mobile Network Prefix (RFC3963)", 7: "Link-Layer Address (RFC4068)", 8: "Mobile Node Identifier (RFC4283)", 9: "Mobility Message Authentication (RFC4285)", 10: "Replay Protection (RFC4285)", 11: "CGA Parameters Request (RFC4866)", 12: "CGA Parameters (RFC4866)", 13: "Signature (RFC4866)", 14: "Home Keygen Token (RFC4866)", 15: "Care-of Test Init (RFC4866)", 16: "Care-of Test (RFC4866)" } class _MIP6OptAlign: """ Mobile IPv6 options have alignment requirements of the form x*n+y. This class is inherited by all MIPv6 options to help in computing the required Padding for that option, i.e. the need for a Pad1 or PadN option before it. They only need to provide x and y as class parameters. (x=0 and y=0 are used when no alignment is required)""" def alignment_delta(self, curpos): x = self.x ; y = self.y if x == 0 and y ==0: return 0 delta = x*((curpos - y + x - 1)/x) + y - curpos return delta class MIP6OptBRAdvice(_MIP6OptAlign, Packet): name = 'Mobile IPv6 Option - Binding Refresh Advice' fields_desc = [ ByteEnumField('otype', 2, _mobopttypes), ByteField('olen', 2), ShortField('rinter', 0) ] x = 2 ; y = 0# alignment requirement: 2n class MIP6OptAltCoA(_MIP6OptAlign, Packet): name = 'MIPv6 Option - Alternate Care-of Address' fields_desc = [ ByteEnumField('otype', 3, _mobopttypes), ByteField('olen', 16), IP6Field("acoa", "::") ] x = 8 ; y = 6 # alignment requirement: 8n+6 class MIP6OptNonceIndices(_MIP6OptAlign, Packet): name = 'MIPv6 Option - Nonce Indices' fields_desc = [ ByteEnumField('otype', 4, _mobopttypes), ByteField('olen', 16), ShortField('hni', 0), ShortField('coni', 0) ] x = 2 ; y = 0 # alignment requirement: 2n class MIP6OptBindingAuthData(_MIP6OptAlign, Packet): name = 'MIPv6 Option - Binding Authorization Data' fields_desc = [ ByteEnumField('otype', 5, _mobopttypes), ByteField('olen', 16), BitField('authenticator', 0, 96) ] x = 8 ; y = 2 # alignment requirement: 8n+2 class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963 name = 'NEMO Option - Mobile Network Prefix' fields_desc = [ ByteEnumField("otype", 6, _mobopttypes), ByteField("olen", 16), ByteField("reserved", 0), ByteField("plen", 64), IP6Field("prefix", "::") ] x = 8 ; y = 4 # alignment requirement: 8n+4 class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068 name = "MIPv6 Option - Link-Layer Address (MH-LLA)" fields_desc = [ ByteEnumField("otype", 7, _mobopttypes), ByteField("olen", 7), ByteEnumField("ocode", 2, _rfc4068_lla_optcode), ByteField("pad", 0), MACField("lla", ETHER_ANY) ] # Only support ethernet x = 0 ; y = 0 # alignment requirement: none class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283 name = "MIPv6 Option - Mobile Node Identifier" fields_desc = [ ByteEnumField("otype", 8, _mobopttypes), FieldLenField("olen", None, length_of="id", fmt="B", adjust = lambda pkt,x: x+1), ByteEnumField("subtype", 1, {1: "NAI"}), StrLenField("id", "", length_from = lambda pkt: pkt.olen-1) ] x = 0 ; y = 0 # alignment requirement: none # We only support decoding and basic build. Automatic HMAC computation is # too much work for our current needs. It is left to the user (I mean ... # you). --arno class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5) name = "MIPv6 Option - Mobility Message Authentication" fields_desc = [ ByteEnumField("otype", 9, _mobopttypes), FieldLenField("olen", None, length_of="authdata", fmt="B", adjust = lambda pkt,x: x+5), ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option", 2: "MN-AAA authentication mobility option"}), IntField("mspi", None), StrLenField("authdata", "A"*12, length_from = lambda pkt: pkt.olen-5) ] x = 4 ; y = 1 # alignment requirement: 4n+1 # Extracted from RFC 1305 (NTP) : # NTP timestamps are represented as a 64-bit unsigned fixed-point number, # in seconds relative to 0h on 1 January 1900. The integer part is in the # first 32 bits and the fraction part in the last 32 bits. class NTPTimestampField(LongField): epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0) def i2repr(self, pkt, x): if x < ((50*31536000)<<32): return "Some date a few decades ago (%d)" % x # delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to # January 1st 1970 : delta = -2209075761 i = int(x >> 32) j = float(x & 0xffffffff) * 2.0**-32 res = i + j + delta from time import strftime t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res)) return "%s (%d)" % (t, x) class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6) name = "MIPv6 option - Replay Protection" fields_desc = [ ByteEnumField("otype", 10, _mobopttypes), ByteField("olen", 8), NTPTimestampField("timestamp", 0) ] x = 8 ; y = 2 # alignment requirement: 8n+2 class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6) name = "MIPv6 option - CGA Parameters Request" fields_desc = [ ByteEnumField("otype", 11, _mobopttypes), ByteField("olen", 0) ] x = 0 ; y = 0 # alignment requirement: none # XXX TODO: deal with CGA param fragmentation and build of defragmented # XXX version. Passing of a big CGAParam structure should be # XXX simplified. Make it hold packets, by the way --arno class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1) name = "MIPv6 option - CGA Parameters" fields_desc = [ ByteEnumField("otype", 12, _mobopttypes), FieldLenField("olen", None, length_of="cgaparams", fmt="B"), StrLenField("cgaparams", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2) name = "MIPv6 option - Signature" fields_desc = [ ByteEnumField("otype", 13, _mobopttypes), FieldLenField("olen", None, length_of="sig", fmt="B"), StrLenField("sig", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3) name = "MIPv6 option - Home Keygen Token" fields_desc = [ ByteEnumField("otype", 14, _mobopttypes), FieldLenField("olen", None, length_of="hkt", fmt="B"), StrLenField("hkt", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4) name = "MIPv6 option - Care-of Test Init" fields_desc = [ ByteEnumField("otype", 15, _mobopttypes), ByteField("olen", 0) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5) name = "MIPv6 option - Care-of Test" fields_desc = [ ByteEnumField("otype", 16, _mobopttypes), FieldLenField("olen", None, length_of="cokt", fmt="B"), StrLenField("cokt", '\x00'*8, length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none class MIP6OptUnknown(_MIP6OptAlign, Packet): name = 'Scapy6 - Unknown Mobility Option' fields_desc = [ ByteEnumField("otype", 6, _mobopttypes), FieldLenField("olen", None, length_of="odata", fmt="B"), StrLenField("odata", "", length_from = lambda pkt: pkt.olen) ] x = 0 ; y = 0 # alignment requirement: none moboptcls = { 0: Pad1, 1: PadN, 2: MIP6OptBRAdvice, 3: MIP6OptAltCoA, 4: MIP6OptNonceIndices, 5: MIP6OptBindingAuthData, 6: MIP6OptMobNetPrefix, 7: MIP6OptLLAddr, 8: MIP6OptMNID, 9: MIP6OptMsgAuth, 10: MIP6OptReplayProtection, 11: MIP6OptCGAParamsReq, 12: MIP6OptCGAParams, 13: MIP6OptSignature, 14: MIP6OptHomeKeygenToken, 15: MIP6OptCareOfTestInit, 16: MIP6OptCareOfTest } # Main Mobile IPv6 Classes mhtypes = { 0: 'BRR', 1: 'HoTI', 2: 'CoTI', 3: 'HoT', 4: 'CoT', 5: 'BU', 6: 'BA', 7: 'BE', 8: 'Fast BU', 9: 'Fast BA', 10: 'Fast NA' } # From http://www.iana.org/assignments/mobility-parameters bastatus = { 0: 'Binding Update accepted', 1: 'Accepted but prefix discovery necessary', 128: 'Reason unspecified', 129: 'Administratively prohibited', 130: 'Insufficient resources', 131: 'Home registration not supported', 132: 'Not home subnet', 133: 'Not home agent for this mobile node', 134: 'Duplicate Address Detection failed', 135: 'Sequence number out of window', 136: 'Expired home nonce index', 137: 'Expired care-of nonce index', 138: 'Expired nonces', 139: 'Registration type change disallowed', 140: 'Mobile Router Operation not permitted', 141: 'Invalid Prefix', 142: 'Not Authorized for Prefix', 143: 'Forwarding Setup failed (prefixes missing)', 144: 'MIPV6-ID-MISMATCH', 145: 'MIPV6-MESG-ID-REQD', 146: 'MIPV6-AUTH-FAIL', 147: 'Permanent home keygen token unavailable', 148: 'CGA and signature verification failed', 149: 'Permanent home keygen token exists', 150: 'Non-null home nonce index expected' } class _MobilityHeader(Packet): name = 'Dummy IPv6 Mobility Header' overload_fields = { IPv6: { "nh": 135 }} def post_build(self, p, pay): p += pay l = self.len if self.len is None: l = (len(p)-8)/8 p = p[0] + struct.pack("B", l) + p[2:] if self.cksum is None: cksum = in6_chksum(135, self.underlayer, p) else: cksum = self.cksum p = p[:4]+struct.pack("!H", cksum)+p[6:] return p class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg name = "IPv6 Mobility Header - Generic Message" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", None, mhtypes), ByteField("res", None), XShortField("cksum", None), StrLenField("msg", "\x00"*2, length_from = lambda pkt: 8*pkt.len-6) ] # TODO: make a generic _OptionsField class _MobilityOptionsField(PacketListField): islist = 1 holds_packet = 1 def __init__(self, name, default, cls, curpos, count_from=None, length_from=None): self.curpos = curpos PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from) def getfield(self, pkt, s): l = self.length_from(pkt) return s[l:],self.m2i(pkt, s[:l]) def i2len(self, pkt, i): return len(self.i2m(pkt, i)) def m2i(self, pkt, x): opt = [] while x: o = ord(x[0]) # Option type cls = self.cls if moboptcls.has_key(o): cls = moboptcls[o] try: op = cls(x) except: op = self.cls(x) opt.append(op) if isinstance(op.payload, Raw): x = op.payload.load del(op.payload) else: x = "" return opt def i2m(self, pkt, x): autopad = None try: autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field except: autopad = 1 if not autopad: return "".join(map(str, x)) curpos = self.curpos s = "" for p in x: d = p.alignment_delta(curpos) curpos += d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) pstr = str(p) curpos += len(pstr) s += pstr # Let's make the class including our option field # a multiple of 8 octets long d = curpos % 8 if d == 0: return s d = 8 - d if d == 1: s += str(Pad1()) elif d != 0: s += str(PadN(optdata='\x00'*(d-2))) return s def addfield(self, pkt, s, val): return s+self.i2m(pkt, val) class MIP6MH_BRR(_MobilityHeader): name = "IPv6 Mobility Header - Binding Refresh Request" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 0, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("res2", None), _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 8, length_from = lambda pkt: 8*pkt.len) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): # Hack: BRR, BU and BA have the same hashret that returns the same # value "\x00\x08\x09" (concatenation of mhtypes). This is # because we need match BA with BU and BU with BRR. --arno return "\x00\x08\x09" class MIP6MH_HoTI(_MobilityHeader): name = "IPv6 Mobility Header - Home Test Init" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 1, mhtypes), ByteField("res", None), XShortField("cksum", None), StrFixedLenField("cookie", "\x00"*8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 16, length_from = lambda pkt: 8*(pkt.len-1)) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): return self.cookie class MIP6MH_CoTI(MIP6MH_HoTI): name = "IPv6 Mobility Header - Care-of Test Init" __metaclass__ = NewDefaultValues mhtype = 2 def hashret(self): return self.cookie class MIP6MH_HoT(_MobilityHeader): name = "IPv6 Mobility Header - Home Test" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), ByteEnumField("mhtype", 3, mhtypes), ByteField("res", None), XShortField("cksum", None), ShortField("index", None), StrFixedLenField("cookie", "\x00"*8, 8), StrFixedLenField("token", "\x00"*8, 8), _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 24, length_from = lambda pkt: 8*(pkt.len-2)) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): return self.cookie def answers(self): if (isinstance(other, MIP6MH_HoTI) and self.cookie == other.cookie): return 1 return 0 class MIP6MH_CoT(MIP6MH_HoT): name = "IPv6 Mobility Header - Care-of Test" __metaclass__ = NewDefaultValues mhtype = 4 def hashret(self): return self.cookie def answers(self): if (isinstance(other, MIP6MH_CoTI) and self.cookie == other.cookie): return 1 return 0 class LifetimeField(ShortField): def i2repr(self, pkt, x): return "%d sec" % (4*x) class MIP6MH_BU(_MobilityHeader): name = "IPv6 Mobility Header - Binding Update" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) ByteEnumField("mhtype", 5, mhtypes), ByteField("res", None), XShortField("cksum", None), XShortField("seq", None), # TODO: ShortNonceField FlagsField("flags", 49, 6, "AHLKMR"), XBitField("reserved", 0, 10), LifetimeField("mhtime", 3), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 12, length_from = lambda pkt: 8*pkt.len - 4) ] overload_fields = { IPv6: { "nh": 135 } } def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return "\x00\x08\x09" def answers(self, other): if isinstance(other, MIP6MH_BRR): return 1 return 0 class MIP6MH_BA(_MobilityHeader): name = "IPv6 Mobility Header - Binding ACK" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) ByteEnumField("mhtype", 6, mhtypes), ByteField("res", None), XShortField("cksum", None), ByteEnumField("status", 0, bastatus), FlagsField("flags", 2, 2, "KR"), XBitField("res2", None, 6), XShortField("seq", None), # TODO: ShortNonceField XShortField("mhtime", 0), # unit == 4 seconds _PhantomAutoPadField("autopad", 1), # autopad activated by default _MobilityOptionsField("options", [], MIP6OptUnknown, 12, length_from = lambda pkt: 8*pkt.len-4) ] overload_fields = { IPv6: { "nh": 135 }} def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret() return "\x00\x08\x09" def answers(self, other): if (isinstance(other, MIP6MH_BU) and other.mhtype == 5 and self.mhtype == 6 and other.flags & 0x1 and # Ack request flags is set self.seq == other.seq): return 1 return 0 _bestatus = { 1: 'Unknown binding for Home Address destination option', 2: 'Unrecognized MH Type value' } # TODO: match Binding Error to its stimulus class MIP6MH_BE(_MobilityHeader): name = "IPv6 Mobility Header - Binding Error" fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) ByteEnumField("mhtype", 7, mhtypes), ByteField("res", 0), XShortField("cksum", None), ByteEnumField("status", 0, _bestatus), ByteField("reserved", 0), IP6Field("ha", "::"), _MobilityOptionsField("options", [], MIP6OptUnknown, 24, length_from = lambda pkt: 8*(pkt.len-2)) ] overload_fields = { IPv6: { "nh": 135 }} _mip6_mhtype2cls = { 0: MIP6MH_BRR, 1: MIP6MH_HoTI, 2: MIP6MH_CoTI, 3: MIP6MH_HoT, 4: MIP6MH_CoT, 5: MIP6MH_BU, 6: MIP6MH_BA, 7: MIP6MH_BE } ############################################################################# ############################################################################# ### SEND and CGA ### ############################################################################# ############################################################################# SEND_TAG='\x08\x6F\xCA\x5E\x10\xB2\x00\xC9\x9C\x8C\xE0\x01\x64\x27\x7C\x08' _cga_ext = { 0xFFFD: "Exp_FFFD", # etype to extension name mapping 0xFFFE: "Exp_FFFE", 0xFFFF: "Exp_FFFF"} _cga_ext_cls = {} # etype to extension class mapping class CGAExt(Packet): # RFC 4581 name = "CGA Extension" fields_desc = [ ShortEnumField("etype", None, _cga_ext), FieldLenField("elen", None, length_of="edata", fmt="!H"), StrLenField("edata", "", length_from = lambda pkt: pkt.elen) ] def guess_payload_class(self, s): return Padding class CGASubnetPrefixField(IP6Field): def __init__(self, name, default): Field.__init__(self, name, default, "8s") def i2m(self, pkt, x): return inet_pton(socket.AF_INET6, x)[:8] def m2i(self, pkt, x): x += "\x00"*8 return inet_ntop(socket.AF_INET6, x) class CGAPubKeyField(StrField): def i2m(self, pkt, x): return str(x) def m2i(self, pkt, m): return m def getfield(self, pkt, s): # this is an RSA PubKey ? try: z = PubKey(s) l = len(str(z)) if z!=None: return s[l:], z except: pass # this is an ECC key ? try: from ecc import ECCkey z = ECCkey(s) l = len(str(z)) return s[l:], z except: return s, None def i2repr(self, pkt, x): if isinstance(x, PubKey): return "%d bits, exp %d" % (x.modulusLen, x.pubExp) try: from ecc import ECCkey if isinstance(x, ECCkey): return SigTypeID[x.get_sigtypeID()[0]] except ImportError: pass return x class CGAExtField(PacketListField): def i2len(self, pkt, z): if z is None or z == []: return 0 return sum(map(lambda x: len(str(x)) ,z)) def m2i(self, pkt, m): if len(m) >= 2: etype = struct.unpack("!H", m[:2])[0] if _cga_ext_cls.has_key(etype): return _cga_ext_cls[etype](m) return self.cls(m) def getfield(self, pkt, s): lst = [] remain = s while len(remain)>=4: p = self.m2i(pkt,remain) if Padding in p: pad = p[Padding] remain = pad.load del(pad.underlayer.payload) else: remain = "" lst.append(p) return "",lst class CGAParams(Packet): name = "CGA Parameters" fields_desc = [ StrFixedLenField("modifier", '\x00'*16, 16), CGASubnetPrefixField("prefix", "::"), ByteField("ccount", 0), CGAPubKeyField("pubkey", ""), CGAExtField("ext", [], CGAExt) ] def __init__(self, _pkt="", *args, **kargs): if _pkt != "" and (not '\x00' in _pkt) and os.path.isfile(_pkt): # file f = open(_pkt) s = f.read() f.close() Packet.__init__(self, s, *args, **kargs) else: Packet.__init__(self, _pkt=_pkt, *args, **kargs) def hash1(self): """ Return the 64-bits Hash1 value as described in section 3 of RFC 3972. """ s = SHA.new(str(self)).digest() return s[:8] def hash2(self): """ Return the 112-bits Hash2 value as described in section 3 of RFC 3972. """ ''' daveti: this is NOT Hash2 used in the CGA verification tmp = self.copy() tmp.prefix = "::" tmp.ccount = 0 ''' # Hash2 based on CGA verification ext_str = "".join(map(lambda x: str(x), self.ext)) key_str = str(self.pubkey) s = "".join((self.modifier, '\x00\x00\x00\x00\x00\x00\x00\x00\x00', key_str, ext_str)) s = SHA.new(str(s)).digest() return s[:14] def CGAgen1(prefix,key,sec,ext=[],modifier=None,ccount=None): """compute unverified, but deterministic CGA values Should not be called directly""" if sec < 0 or sec > 7: print "sec must be an integer between 0 and 7" return None try: from ecc import ECCkey except ImportError: class ECCkey(): pass if not isinstance(key, PubKey) \ and not isinstance(key, ECCkey): print "key parameter is not a public key" return None if type(ext) != list: ext = [ext] ext_str = "".join(map(lambda x: str(x), ext)) # different steps of section 4 of RFC3972 # 1 # if a modifier is specified, shunt the randomization process if modifier!=None: m = modifier else: m = randstring(16) # 2, 3 # we skip 2 and 3 if the modifier is fixed during the call # daveti: The RFC says that we could skip 2-3 if sec=0 rather than a fixed modifier. # Even if the modifier is fixed, as long as sec!=0, we need to update the modifier. # The implication seems that the fixed modifier should only be used with sec=0 if we # want the modifier to be fixed! key_str = str(key) #if not modifier: if sec != 0: while True: # TC: seems more optimized # s = m + '\x00'*9 + key_str + ext_str s = "".join((m, '\x00\x00\x00\x00\x00\x00\x00\x00\x00', key_str, ext_str)) s = SHA.new(s).digest() Hash2 = s[:14] if sec == 0 or Hash2[:2*sec] == '\x00\x00'*sec: break m = pkcs_i2osp(pkcs_os2ip(m) + 1, 16)[-16:] # 4 if not ccount: ccount = 0 # 5 dad_retries = 0 c = CGAParams(modifier = m, prefix = prefix, ccount = ccount, pubkey = key, ext = ext) Hash1 = c.hash1() # 6 tmp = (ord(Hash1[0]) & 0x1c) + (sec << 5) ifaceid = chr(tmp) + Hash1[1:] # 7 p = socket.inet_pton(socket.AF_INET6, prefix)[:8] addr = socket.inet_ntop(socket.AF_INET6, p + ifaceid) # steps 8 and 9 are not performed here, but are performed in the function CGAgen() # 9 # c = CGAParams(modifier = m, prefix = prefix, ccount = ccount, # pubkey = key, ext = ext) return (addr, c) def CGAgen(prefix, key, sec, ext=[], do_dad=False, modifier=None, ccount=None): """ Given: - the prefix: an address, only first 64 bits been taken into account. - the public key: a PubKey instance - the security parameter: a value between 0 and 7 - optional extensions as a string, extension or list of extensions. One can render the process deterministic by passing: - the modifier: a 16 bytes random number - the collision counter: a value between 0 and 2 the function returns a tuple (addr, params), where: - addr is the CGA - params are the associated CGA parameters (a CGAParams instance) The algorithm is the one described in section 4 of RFC 3972. if do_dad is set to True (False being the default value), then the duplicate address detection step described at step 8 in reference document is done. None is returned on error. """ # perform steps 1 to 7 (+ step 9) (addr,c) = CGAgen1(prefix,key,sec,ext,modifier,ccount) if not ccount: ccount = 0 # step 8 while True: if not do_dad: break if ccount == 3: print "DAD performed three times, three collisions found" return None # FIXME # TC 10/08/09: this function call perform a broken DAD # - only listen for one answer # - does not listen for the sollicited node multicast address resp = neighsol(addr, "::", iface=conf.iface) if resp is None: break else: ccount += 1 (addr,c)=CGAgen1(prefix,key,sec,ext,c.modifier,ccount) # step 9 has already been performed return (addr, c) def CGAverify(addr, params): """ Given: - an address ('addr'): - CGA parameters ('params'): the function returns True if the public key in the CGA parameters is verified as the authentic public key of the address owner. False is returned if the verification fails. The algorithm is the one described in section 5 of RFC 3972. """ if not isinstance(params, CGAParams): print "params argument is not a CGAParams structure" return False # 1 if params.ccount < 0 or params.ccount > 2: print "Found invalid Collision Count (%d) in CGA verification" % params.ccount return False # 2 params_prefix = socket.inet_pton(socket.AF_INET6, params.prefix)[:8] addr_prefix = socket.inet_pton(socket.AF_INET6, addr)[:8] if params_prefix != addr_prefix: print "Mismatch in subnet prefixes during CGA verification" return False # 3 Hash1 = params.hash1() # 4 mask1 = '\x1c\xff\xff\xff\xff\xff\xff\xff' ifaceid = socket.inet_pton(socket.AF_INET6, addr)[8:] if strand(mask1, ifaceid) != strand(mask1, Hash1): print "Mismatch between ifaceid and Hash1 during CGA verification" return False # 5 sec = (ord(ifaceid[0]) >> 5) & 0x07 # 6 Hash2 = params.hash2() # 7 if Hash2[:2*sec] != '\x00'*sec*2: print "Invalid Hash2 value found during CGA verification" print " Sec: %d, Hash2: %s" % (sec, repr(Hash2)) return False return True def CGAsign(m, key, tag=None): """ CGA Sign message 'm' with provided private key (Key instance) as described in Section 6 of RFC 3972. 'tag' argument is the tag expected by the algorithm. If none is provided, it defaults to SEND tag as defined in RFC 3971, i.e. 0x086F CA5E 10B2 00C9 9C8C E001 6427 7C08 """ if tag is None: tag = SEND_TAG m = tag + m s = key.sign(m, "pkcs") return s def CGAverifySig(m, sig, cga, params, tag=None): """ Verify message 'm' signature is indeed 'sig' as described in section 6 of RFC 3972. 'cga' is the address and 'params' are associated parameters. """ if tag is None: tag = SEND_TAG if not CGAverify(cga, params): return False m = tag + m return params.pubkey.verify(m, sig, "pkcs") SigTypeID = { 0: "RSA/SHA-1", 1: "RSA/SHA-256", 9: "ECDSA (P-256)/SHA-256", 10: "ECDSA (P-384)/SHA-384", 11: "ECDSA (P-521)/SHA-512" } SigTypeHashfunc = { 0: "sha1", 1: "sha256", 9: "sha256", 10: "sha384", 11: "sha512" } class SigAlg(Packet): name = "Signature Algorithm field" fields_desc = [ BitField("sign", 0, 1), BitField("reserved",0,2), BitEnumField("sigtypeID",0,5, SigTypeID ) ] def extract_padding(self, pay): return "",pay class ICMPv6NDOptSSA(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - SSA" fields_desc = [ ByteEnumField("type", 42, icmp6ndopts), ByteField("len", None), ByteField("padlen", None), ByteField("res", None), PacketListField("sigalgs", [], SigAlg, length_from = lambda pkt: 8*pkt.len - 4 - pkt.padlen ), StrLenField("pad", None, length_from = lambda pkt: pkt.padlen) ] def post_build(self, pkt, pay): if self.pad is None: padlen = 8 - (len(pkt) % 8) if padlen == 8: padlen = 0 pkt += '\x00'*padlen else: padlen = len(self.pad) if self.padlen is None: pkt = pkt[:2] + chr(padlen) + pkt[3:] if self.len is None: l = len(pkt) / 8 pkt = pkt[:1] + chr(l) + pkt[2:] return pkt + pay class ICMPv6NDOptCGA(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - CGA" fields_desc = [ ByteEnumField("type", 11, icmp6ndopts), ByteField("len", None), ByteField("padlen", None), ByteField("res", None), PacketLenField("cgaparams", "", CGAParams, length_from = lambda pkt: 8*pkt.len - 4 - pkt.padlen ), StrLenField("pad", None, length_from = lambda pkt: pkt.padlen) ] def post_build(self, pkt, pay): if self.pad is None: padlen = 8 - (len(pkt) % 8) if padlen == 8: padlen = 0 pkt += '\x00'*padlen else: padlen = len(self.pad) if self.padlen is None: pkt = pkt[:2] + chr(padlen) + pkt[3:] if self.len is None: l = len(pkt) / 8 pkt = pkt[:1] + chr(l) + pkt[2:] return pkt + pay # This field is a transparent one to allow passing a public, # a private key or a cert for the purpose of signature computation # verification. class _PhantomKeyField(ByteField): def addfield(self, pkt, s, val): return s def getfield(self, pkt, s): # internal value will possibly be set when key # hash will be available from the dissection of # "keyh" field. It is temporarily set to None. return s, None def i2repr(self, pkt, x): try: from ecc import ECCkey if isinstance(x, ECCkey): # XXX FIX ME: do more, i.e. print the key return "ECC Key available" except ImportError: pass if isinstance(x, PubKey): # XXX FIX ME: do more, i.e. print the key return "Public Key available" elif isinstance(x, Key): # XXX FIX ME: do more, i.e. print the key return "Private Key available" elif isinstance(x, Cert): # XXX FIX ME: do more, i.e. print the Cert return "Certificate available" return "No key/cert available for signature/verification" # XXX At some point, this function should be replaced by Phil's # ASN1 module magic and be moved to cert class. Implementation # below is pure hack ... but we need it. def construct_der_pubkey(m, mLen, e): """ Construct the DER encoded SubjectPublicKeyInfo structure from modulus string, modulus length (in bytes to add leading padding if needed) and exponent value. """ # Construct wrapped modulus padlen = mLen - len(m) + 1 m = '\x00' * padlen + m mlen = len(m) m = '\x02\x82' + struct.pack("!H", mlen) + m # Construct wrapped exponent e_str = "" while e: e_str = chr(e & 0xff) + e_str e = e >> 8 e = e_str elen = len(e) e = '\x02' + chr(elen) + e # Wrap the two res = m + e reslen = len(res) res = '\x30\x82' + struct.pack("!H", reslen) + res # Put everything in a bitstring res = '\x00' + res reslen = len(res) res = '\x03\x82' + struct.pack('!H', reslen) + res # rsaEncryption rsa_str = "\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00" rsa_str_len = len(rsa_str) rsa_str = '\x30' + chr(rsa_str_len) + rsa_str # Assemble both parts res = rsa_str + res res_len = len(res) # Wrap everything in a sequence res = '\x30\x82'+ struct.pack('!H', res_len) + res return res def get_public_key_hash(k, sigtypeID=0): """ Return the most-significant 128-bit of a SHA-XXX hash of the public key. k can be a Key, PubKey or Cert instance. None is returned on error. This function is used by "Key Hash" field in RSA Signature option The hash function is determined by the sigtypeID parameter. """ import hashlib s = None if isinstance(k, PubKey): s = str(k) elif isinstance(k, Key): mLen = k.modulusLen / 8 m = pkcs_i2osp(k.modulus, mLen) e = k.pubExp s = construct_der_pubkey(m, mLen, e) elif isinstance(k, Cert): mLen = k.modulusLen / 8 m = pkcs_i2osp(k.modulus, mLen) e = k.exponent s = construct_der_pubkey(m, mLen, e) try: from ecc import ECCkey if isinstance(k, ECCkey): s = str(k) except ImportError: pass if s is None: return None try: hashfunc = getattr(hashlib, SigTypeHashfunc[sigtypeID]) s = hashfunc(s).digest() except KeyError: print "sigtypeID must be 0, 1, 9, 10 or 11" return s[:16] class _XYKeyHashField(StrFixedLenField): def getfield(self, pkt, s): l = self.length_from(pkt) return s[l:], self.m2i(pkt,s[:l]) def i2m(self, pkt, x): if x is None: x = "" if pkt.key is not None: x = get_public_key_hash(pkt.key, pkt.sigtypeID) elif type(x) is not str: x=str(x) return x def addfield(self, pkt, s, val): l = self.length_from(pkt) return s+struct.pack("%is"%l,self.i2m(pkt, val)) # RFC 3971 Bug #1: without the key modulus length you cannot easily # extract the padding from the signature field. padlen field has been # removed from the packet format in version 06 of the draft. # Mail sent on cga-ext@ietf.org on that topic with no response. # IMHO, this is a design error. # Below, we make the hypothesis that the 'sig' field holding the # signature is a multiple of 8 bytes in length and compute the padding # value from that hypothesis (i.e. fixed: 4 bytes) # # RFC 3971 Bug #2: for the purpose of signature field computation, an # ICMPv6 checksum must be computed on a custom version of the packet # (RSA Signature and following options removed, updated payload length # value in the IPv6 header). The description in RFC 3971 is clearly # misleading. # Mail sent on cga-ext@ietf.org, with no response. Then, to Eric # Levy-Abegnoli who provided useful information on the way to generate # a first checksum for the purpose of RSA Signature computation. # # --arno # # TC: added a padlen field as specified in draft-cheneau-cis-send-sig-agility # removes all ambiguity on how to compute the padding with the sigtypeID is # different from 0 (0 is for backward compatibility with RFC 3971) class ICMPv6NDOptUSSig(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Universal Signature" fields_desc = [ ByteEnumField("type", 12, icmp6ndopts), ByteField("len", None), ByteField("padlen", None), XBitField("pos", 0, 3), # key position field, as defined in old version of draft-cheneau-csi-send-sig-agility XBitField("sigtypeID", 0, 5), _PhantomKeyField("key", None), # I'm not really there _XYKeyHashField("keyh", None, 16), StrLenField("sig", None, # behavior depends on sigtypeID value length_from = lambda pkt: (pkt.sigtypeID==0 and 8*(pkt.len-3))\ or 8*pkt.len - pkt.padlen -20), StrLenField("pad", None, # behavior depends on sigtypeID value length_from = lambda pkt: (pkt.sigtypeID==0 and 4) or pkt.padlen) ] def build_tbs_string(self): """ build the string to be signed, as described in Section 5.2 of RFC 3971. None is returned on error. """ tmp = self while tmp.underlayer: tmp = tmp.underlayer tmp = tmp.copy() p = tmp[ICMPv6NDOptUSSig] # We have to construct a fake version of the packet # without the Universal Signature option. We work on a copy c = p.underlayer if c is None: print "Missing underlayed during Universal Signature Option post_build()" return None # Remove the RSA Signature option (and following options) c.payload = None p.underlayer = None # Find ICMPv6 payload and flush checksum field i = c while not (isinstance(i, _ICMPv6) or i is None): i = i.underlayer if i is None: print "Unable to find ICMPv6 payload during Universal Signature Option post_build()" return None del(i.cksum) # Find IPv6 payload and flush payload length field p = i while not (isinstance(p, IPv6) or p is None): p = p.underlayer if p is None: print "Unable to find IPv6 payload during Universal Signature Option post_build()" return None del(p.plen) src = p.src dst = p.dst pay = str(i) # Now, let's build the string that will be signed s = SEND_TAG s += socket.inet_pton(socket.AF_INET6, src) s += socket.inet_pton(socket.AF_INET6, dst) s += pay return s def verify_sig(self, k): """ Verify universal signature option validity against provided key (public or private key pair) or certificate. """ if self.sig is None: return False # signature's size is the size of the Public Key if self.sigtypeID == 0: import math signature = self.sig[:int(math.ceil(float(len(k))/8))] s = self.build_tbs_string() return k.verify(s, signature, "pkcs") elif self.sigtypeID in [1, 9, 10, 11]: # this is RSA/SHA-256 and ECC s = self.build_tbs_string() return k.verify(s,self.sig, "pkcs", SigTypeHashfunc[self.sigtypeID]) def post_build(self, pkt, pay): sig = "" if self.sig is None: k = self.key if k is not None: s = self.build_tbs_string() if s is not None: sig = k.sign(s, "pkcs", SigTypeHashfunc[self.sigtypeID]) # add other signature algorithms here self.sig = sig pkt = pkt[:20] + sig + pkt[20:] else: print "Unable to compute signature in Universal Signature option post_build()" else: print "No private key provided in Universal Signature option" if self.pad is None: padlen = 8 - (len(pkt) % 8) if padlen == 8: padlen = 0 if self.sigtypeID != 0: pkt = pkt[:2] + chr(padlen) + pkt[3:] pkt += '\x00'*padlen if self.len is None: l = len(pkt) / 8 pkt = pkt[:1] + chr(l) + pkt[2:] return pkt + pay class _TimestampField(IntField): # Internal repr for the timestamp value is a float epoch = (1970, 1, 1, 0, 0, 0, 5, 1, 0) # our Epoch def getfield(self, pkt, s): sec, rem = s[:8], s[8:] sec_frac = struct.unpack('!H', sec[6:])[0] / 65536. sec = pkcs_os2ip(sec[:6]) i = sec + sec_frac return rem, i def addfield(self, pkt, s, val): if val is None: val = time.time() return s + pkcs_i2osp(int(val*65536), 8) def i2repr(self, pkt, x): from time import gmtime, strftime, mktime, localtime if x is None: x = localtime() delta = mktime(self.epoch) - mktime(gmtime(0)) x = x + delta t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(x)) return "%s (%f)" % (t, x) class ICMPv6NDOptTimestamp(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Timestamp" fields_desc = [ ByteEnumField("type", 13, icmp6ndopts), ByteField("len", 2), StrFixedLenField("res", None, 6), _TimestampField("timestamp", None) ] class ICMPv6NDOptNonce(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Nonce" fields_desc = [ ByteEnumField("type", 14, icmp6ndopts), FieldLenField("len", None, length_of="nonce", fmt="B", adjust = lambda pkt,x: (x+2)/8), StrLenField("nonce", "\x00"*6, length_from = lambda pkt: 8*pkt.len - 2) ] def hashret(self): return self.nonce + self.payload.hashret() def answers(self, other): tmp = other while tmp.underlayer: tmp = tmp.underlayer if ICMPv6NDOptNonce in tmp: tmp = tmp[ICMPv6NDOptNonce] return tmp.nonce == self.nonce return 0 _send_name_types = { 1: "DER Encoded X.501 Name", 2: "FQDN"} class ICMPv6NDOptTrustAnchor(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Trust Anchor" fields_desc = [ ByteEnumField("type", 15, icmp6ndopts), ByteField("len", None), ByteEnumField("nametype", 1, _send_name_types), ByteField("padlen", None), StrLenField("name_field", None, length_from = lambda pkt: 8*pkt.len - 4 - pkt.padlen), StrLenField("pad", None, length_from = lambda pkt: pkt.padlen) ] def post_build(self, pkt, pay): if self.pad is None: padlen = 8 - (len(pkt) % 8) if padlen == 8: padlen = 0 pkt += '\x00'*padlen else: padlen = len(self.pad) if self.padlen is None: pkt = pkt[:2] + chr(padlen) + pkt[3:] if self.len is None: l = len(pkt) / 8 pkt = pkt[:1] + chr(l) + pkt[2:] return pkt + pay class CertField(StrLenField): def i2m(self, pkt, i): if i is None: i = "" return str(i) def getfield(self, pkt, s): l = self.length_from(pkt) # available length m = s try: s = Cert(s) except: pass l = len(str(s)) # we give back what we did not eat return m[l:], s def i2repr(self, pkt, i): return repr(i) _send_cert_types = { 1: "X.509v3 Certificate" } class ICMPv6NDOptCertificate(_ICMPv6NDGuessPayload, Packet): name = "ICMPv6 Neighbor Discovery Option - Certificate" fields_desc = [ ByteEnumField("type", 16, icmp6ndopts), ByteField("len", None), ByteEnumField("certtype", 1, _send_cert_types), ByteField("res", None), CertField("cert", None, length_from = lambda pkt: 8*pkt.len - 4), StrLenField("pad", None, length_from = lambda pkt: 8*pkt.len - 4 - len(str(pkt.cert))) ] def post_build(self, pkt, pay): if self.pad is None: padlen = 8 - (len(pkt) % 8) if padlen == 8: padlen = 0 pkt += '\x00'*padlen else: padlen = len(self.pad) if self.len is None: l = len(pkt) / 8 pkt = pkt[:1] + chr(l) + pkt[2:] return pkt + pay class ICMPv6SEND_CPS(_ICMPv6NDGuessPayload, _ICMPv6): name = 'ICMPv6 SEND Certification Path Solicitation' fields_desc = [ ByteEnumField("type", 148, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), XShortField("comp", 0xff) ] def hashret(self): return struct.pack("!H",self.id)+self.payload.hashret() class ICMPv6SEND_CPA(_ICMPv6NDGuessPayload, _ICMPv6): name = 'ICMPv6 SEND Certification Path Advertisement' fields_desc = [ ByteEnumField("type", 149, icmp6types), ByteField("code", 0), XShortField("cksum", None), XShortField("id", None), XShortField("allcomp", None), XShortField("comp", None), XShortField("res", 0x00) ] def hashret(self): return struct.pack("!H",self.id)+self.payload.hashret() # TODO: # - Implement sth to perform the check described in 5.1.2. of RFC 3971. # - answers and hashret() # - helpers for delegation in certificates # - improve implementation of "name" field in ICMPv6NDOptTrustAnchor # to deal differently with name types (DNS wire or DER encoded version) ############################################################################# ############################################################################# ### Traceroute6 ### ############################################################################# ############################################################################# class AS_resolver6(AS_resolver_riswhois): def _resolve_one(self, ip): """ overloaded version to provide a Whois resolution on the embedded IPv4 address if the address is 6to4 or Teredo. Otherwise, the native IPv6 address is passed. """ if in6_isaddr6to4(ip): # for 6to4, use embedded @ tmp = inet_pton(socket.AF_INET6, ip) addr = inet_ntop(socket.AF_INET, tmp[2:6]) elif in6_isaddrTeredo(ip): # for Teredo, use mapped address addr = teredoAddrExtractInfo(ip)[2] else: addr = ip _, asn, desc = AS_resolver_riswhois._resolve_one(self, addr) return ip,asn,desc class TracerouteResult6(TracerouteResult): def show(self): return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 ! s.hlim, r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+ "{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+ "{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+ "{ICMPv6EchoReply:%ir,type%}"))) def get_trace(self): trace = {} for s,r in self.res: if IPv6 not in s: continue d = s[IPv6].dst if d not in trace: trace[d] = {} t = not (ICMPv6TimeExceeded in r or ICMPv6DestUnreach in r or ICMPv6PacketTooBig in r or ICMPv6ParamProblem in r) trace[d][s[IPv6].hlim] = r[IPv6].src, t for k in trace.values(): m = filter(lambda x: k[x][1], k.keys()) if not m: continue m = min(m) for l in k.keys(): if l > m: del(k[l]) return trace def graph(self, ASres=AS_resolver6(), **kargs): TracerouteResult.graph(self, ASres=ASres, **kargs) def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4 = None, timeout=2, verbose=None, **kargs): """ Instant TCP traceroute using IPv6 : traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None """ if verbose is None: verbose = conf.verb if l4 is None: a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport), timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs) else: a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4, timeout=timeout, verbose=verbose, **kargs) a = TracerouteResult6(a.res) if verbose: a.display() return a,b ############################################################################# ############################################################################# ### Sockets ### ############################################################################# ############################################################################# class L3RawSocket6(L3RawSocket): def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0): L3RawSocket.__init__(self, type, filter, iface, promisc) # NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292) self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW) self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) def IPv6inIP(dst='203.178.135.36', src=None): _IPv6inIP.dst = dst _IPv6inIP.src = src if not conf.L3socket == _IPv6inIP: _IPv6inIP.cls = conf.L3socket else: del(conf.L3socket) return _IPv6inIP class _IPv6inIP(SuperSocket): dst = '127.0.0.1' src = None cls = None def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args): SuperSocket.__init__(self, family, type, proto) self.worker = self.cls(**args) def set(self, dst, src=None): _IPv6inIP.src = src _IPv6inIP.dst = dst def nonblock_recv(self): p = self.worker.nonblock_recv() return self._recv(p) def recv(self, x): p = self.worker.recv(x) return self._recv(p, x) def _recv(self, p, x=MTU): if p is None: return p elif isinstance(p, IP): # TODO: verify checksum if p.src == self.dst and p.proto == socket.IPPROTO_IPV6: if isinstance(p.payload, IPv6): return p.payload return p def send(self, x): return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x) ############################################################################# ############################################################################# ### Layers binding ### ############################################################################# ############################################################################# L3Types[ETH_P_IPV6] = IPv6 LLTypes[31] = IPv6 LLNumTypes[IPv6] = 31 bind_layers(Ether, IPv6, type = 0x86dd ) bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP ) bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP ) bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP ) bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP ) bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 ) bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 ) ############################################################################# ### Conf overloading ### ############################################################################# def get_working_if6(): """ try to guess the best interface for conf.iface by looking for the one used by default route if any. """ res = conf.route6.route("::/0") if res: iff, gw, addr = res return iff return get_working_if() conf.route6 = Route6() conf.iface = get_working_if6() if __name__ == '__main__': interact(mydict=globals(), mybanner="IPv6 enabled") else: import __builtin__ __builtin__.__dict__.update(globals())
{ "content_hash": "e9640fcfc9b386455f03f4e6df631fed", "timestamp": "", "source": "github", "line_count": 6811, "max_line_length": 156, "avg_line_length": 37.35060930847159, "alnum_prop": 0.5372236089545785, "repo_name": "daveti/NDprotector", "id": "1f274017b721ea481118ee96ba70b4b88e9dfd09", "size": "255901", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scapy6send/scapy6.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1085" }, { "name": "Python", "bytes": "979542" } ], "symlink_target": "" }
"""Base constants and handlers.""" import base64 import Cookie import datetime import hmac import json import logging import os import sys import time import traceback import urlparse import jinja2 import webapp2 from google.appengine.api import users from core import counters from core.domain import config_domain from core.domain import config_services from core.domain import obj_services from core.domain import rights_manager from core.domain import rte_component_registry from core.domain import user_services from core.platform import models import feconf import jinja_utils import utils current_user_services = models.Registry.import_current_user_services() (user_models,) = models.Registry.import_models([models.NAMES.user]) ONE_DAY_AGO_IN_SECS = -24 * 60 * 60 DEFAULT_CSRF_SECRET = 'oppia csrf secret' CSRF_SECRET = config_domain.ConfigProperty( 'oppia_csrf_secret', {'type': 'unicode'}, 'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET) SITE_NAME = config_domain.ConfigProperty( 'site_name', {'type': 'unicode'}, 'The site name', 'SITE_NAME') BEFORE_END_HEAD_TAG_HOOK = config_domain.ConfigProperty( 'before_end_head_tag_hook', { 'type': 'unicode', 'ui_config': { 'rows': 7, }, }, 'Code to insert just before the closing </head> tag in all pages.', '') SITE_FEEDBACK_FORM_URL = config_domain.ConfigProperty( 'site_feedback_form_url', {'type': 'unicode'}, 'Site feedback form URL (leave blank if there is no such form)', '') def require_user(handler): """Decorator that checks if a user is associated to the current session.""" def test_login(self, **kwargs): """Checks if the user for the current session is logged in.""" if not self.user_id: self.redirect(current_user_services.create_login_url( self.request.uri)) return return handler(self, **kwargs) return test_login def require_moderator(handler): """Decorator that checks if the current user is a moderator.""" def test_is_moderator(self, **kwargs): """Check that the user is a moderator.""" if not self.user_id: self.redirect(current_user_services.create_login_url( self.request.uri)) return if not rights_manager.Actor(self.user_id).is_moderator(): raise self.UnauthorizedUserException( 'You do not have the credentials to access this page.') return handler(self, **kwargs) return test_is_moderator def require_fully_signed_up(handler): """Decorator that checks if the user is logged in and has completed the signup process. If any of these checks fail, an UnauthorizedUserException is raised. """ def test_registered_as_editor(self, **kwargs): """Check that the user has registered as an editor.""" if (not self.user_id or self.username in config_domain.BANNED_USERNAMES.value or not user_services.has_fully_registered(self.user_id)): raise self.UnauthorizedUserException( 'You do not have the credentials to access this page.') return handler(self, **kwargs) return test_registered_as_editor def _clear_login_cookies(response_headers): # AppEngine sets the ACSID cookie for http:// and the SACSID cookie # for https:// . We just unset both below. cookie = Cookie.SimpleCookie() for cookie_name in ['ACSID', 'SACSID']: cookie = Cookie.SimpleCookie() cookie[cookie_name] = '' cookie[cookie_name]['expires'] = ( datetime.datetime.utcnow() + datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS) ).strftime('%a, %d %b %Y %H:%M:%S GMT') response_headers.add_header(*cookie.output().split(': ', 1)) class LogoutPage(webapp2.RequestHandler): def get(self): """Logs the user out, and returns them to a specified page or the home page. """ # The str conversion is needed, otherwise an InvalidResponseError # asking for the 'Location' header value to be str instead of # 'unicode' will result. url_to_redirect_to = str(self.request.get('return_url') or '/') _clear_login_cookies(self.response.headers) if feconf.DEV_MODE: self.redirect(users.create_logout_url(url_to_redirect_to)) else: self.redirect(url_to_redirect_to) class BaseHandler(webapp2.RequestHandler): """Base class for all Oppia handlers.""" # Whether to check POST and PUT payloads for CSRF tokens prior to # processing them. Can be overridden by subclasses if this check is # not necessary. REQUIRE_PAYLOAD_CSRF_CHECK = True # Whether to redirect requests corresponding to a logged-in user who has # not completed signup in to the signup page. This ensures that logged-in # users have agreed to the latest terms. REDIRECT_UNFINISHED_SIGNUPS = True @webapp2.cached_property def jinja2_env(self): return jinja_utils.get_jinja_env(feconf.FRONTEND_TEMPLATES_DIR) def __init__(self, request, response): # pylint: disable=super-init-not-called # Set self.request, self.response and self.app. self.initialize(request, response) self.start_time = datetime.datetime.utcnow() # Initializes the return dict for the handlers. self.values = {} self.user = current_user_services.get_current_user() self.user_id = current_user_services.get_user_id( self.user) if self.user else None self.username = None self.has_seen_editor_tutorial = False self.partially_logged_in = False self.values['profile_picture_data_url'] = None self.preferred_site_language_code = None if self.user_id: email = current_user_services.get_user_email(self.user) user_settings = user_services.get_or_create_user( self.user_id, email) self.values['user_email'] = user_settings.email if (self.REDIRECT_UNFINISHED_SIGNUPS and not user_services.has_fully_registered(self.user_id)): _clear_login_cookies(self.response.headers) self.partially_logged_in = True self.user_id = None else: self.username = user_settings.username self.preferred_site_language_code = ( user_settings.preferred_site_language_code) self.values['username'] = self.username self.values['profile_picture_data_url'] = ( user_settings.profile_picture_data_url) if user_settings.last_started_state_editor_tutorial: self.has_seen_editor_tutorial = True self.is_moderator = rights_manager.Actor(self.user_id).is_moderator() self.is_admin = rights_manager.Actor(self.user_id).is_admin() self.is_super_admin = ( current_user_services.is_current_user_super_admin()) self.values['is_moderator'] = self.is_moderator self.values['is_admin'] = self.is_admin self.values['is_super_admin'] = self.is_super_admin if self.request.get('payload'): self.payload = json.loads(self.request.get('payload')) else: self.payload = None def dispatch(self): """Overrides dispatch method in webapp2 superclass.""" # If the request is to the old demo server, redirect it permanently to # the new demo server. if self.request.uri.startswith('https://oppiaserver.appspot.com'): self.redirect('https://oppiatestserver.appspot.com', True) return # In DEV_MODE, clearing cookies does not log out the user, so we # force-clear them by redirecting to the logout URL. if feconf.DEV_MODE and self.partially_logged_in: self.redirect(users.create_logout_url(self.request.uri)) return if self.payload is not None and self.REQUIRE_PAYLOAD_CSRF_CHECK: try: csrf_token = self.request.get('csrf_token') if not csrf_token: raise Exception( 'Missing CSRF token. Changes were not saved. ' 'Please report this bug.') is_csrf_token_valid = CsrfTokenManager.is_csrf_token_valid( self.user_id, csrf_token) if not is_csrf_token_valid: raise self.UnauthorizedUserException( 'Your session has expired, and unfortunately your ' 'changes cannot be saved. Please refresh the page.') except Exception as e: logging.error( '%s: payload %s', e, self.payload) return self.handle_exception(e, self.app.debug) super(BaseHandler, self).dispatch() def get(self, *args, **kwargs): # pylint: disable=unused-argument """Base method to handle GET requests.""" raise self.PageNotFoundException def post(self, *args): # pylint: disable=unused-argument """Base method to handle POST requests.""" raise self.PageNotFoundException def put(self, *args): # pylint: disable=unused-argument """Base method to handle PUT requests.""" raise self.PageNotFoundException def delete(self, *args): # pylint: disable=unused-argument """Base method to handle DELETE requests.""" raise self.PageNotFoundException def render_json(self, values): self.response.content_type = 'application/javascript; charset=utf-8' self.response.headers['Content-Disposition'] = ( 'attachment; filename="oppia-attachment.txt"') self.response.headers['Strict-Transport-Security'] = ( 'max-age=31536000; includeSubDomains') self.response.headers['X-Content-Type-Options'] = 'nosniff' json_output = json.dumps(values, cls=utils.JSONEncoderForHTML) self.response.write('%s%s' % (feconf.XSSI_PREFIX, json_output)) # Calculate the processing time of this request. duration = datetime.datetime.utcnow() - self.start_time processing_time = duration.seconds + duration.microseconds / 1E6 counters.JSON_RESPONSE_TIME_SECS.inc(increment=processing_time) counters.JSON_RESPONSE_COUNT.inc() def render_template( self, filename, values=None, iframe_restriction='DENY', redirect_url_on_logout=None): if values is None: values = self.values scheme, netloc, path, _, _ = urlparse.urlsplit(self.request.uri) values.update({ 'ALL_CATEGORIES': feconf.ALL_CATEGORIES, 'ALL_LANGUAGE_CODES': feconf.ALL_LANGUAGE_CODES, 'ASSET_DIR_PREFIX': utils.get_asset_dir_prefix(), 'BEFORE_END_HEAD_TAG_HOOK': jinja2.utils.Markup( BEFORE_END_HEAD_TAG_HOOK.value), 'CAN_SEND_ANALYTICS_EVENTS': feconf.CAN_SEND_ANALYTICS_EVENTS, 'DEFAULT_LANGUAGE_CODE': feconf.ALL_LANGUAGE_CODES[0]['code'], 'DEV_MODE': feconf.DEV_MODE, 'MINIFICATION': feconf.IS_MINIFIED, 'DOMAIN_URL': '%s://%s' % (scheme, netloc), 'ACTIVITY_STATUS_PRIVATE': ( rights_manager.ACTIVITY_STATUS_PRIVATE), 'ACTIVITY_STATUS_PUBLIC': ( rights_manager.ACTIVITY_STATUS_PUBLIC), 'ACTIVITY_STATUS_PUBLICIZED': ( rights_manager.ACTIVITY_STATUS_PUBLICIZED), # The 'path' variable starts with a forward slash. 'FULL_URL': '%s://%s%s' % (scheme, netloc, path), 'INVALID_NAME_CHARS': feconf.INVALID_NAME_CHARS, # TODO(sll): Consider including the obj_editor html directly as # part of the base HTML template? 'OBJECT_EDITORS_JS': jinja2.utils.Markup( obj_services.get_all_object_editor_js_templates()), 'RTE_COMPONENT_SPECS': ( rte_component_registry.Registry.get_all_specs()), 'SITE_FEEDBACK_FORM_URL': SITE_FEEDBACK_FORM_URL.value, 'SITE_NAME': SITE_NAME.value, 'SUPPORTED_SITE_LANGUAGES': feconf.SUPPORTED_SITE_LANGUAGES, 'SYSTEM_USERNAMES': feconf.SYSTEM_USERNAMES, 'can_create_collections': ( self.username and self.username in config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value ), 'user_is_logged_in': user_services.has_fully_registered( self.user_id), 'preferred_site_language_code': self.preferred_site_language_code }) if 'meta_name' not in values: values['meta_name'] = 'Personalized Online Learning from Oppia' if 'meta_description' not in values: values['meta_description'] = ( 'Oppia is a free, open-source learning platform. Join the ' 'community to create or try an exploration today!') if redirect_url_on_logout is None: redirect_url_on_logout = self.request.uri if self.user_id: values['logout_url'] = ( current_user_services.create_logout_url( redirect_url_on_logout)) else: target_url = ( '/' if self.request.uri.endswith(feconf.SPLASH_URL) else self.request.uri) values['login_url'] = ( current_user_services.create_login_url(target_url)) # Create a new csrf token for inclusion in HTML responses. This assumes # that tokens generated in one handler will be sent back to a handler # with the same page name. values['csrf_token'] = '' if self.REQUIRE_PAYLOAD_CSRF_CHECK: values['csrf_token'] = CsrfTokenManager.create_csrf_token( self.user_id) self.response.cache_control.no_cache = True self.response.cache_control.must_revalidate = True self.response.headers['Strict-Transport-Security'] = ( 'max-age=31536000; includeSubDomains') self.response.headers['X-Content-Type-Options'] = 'nosniff' if iframe_restriction is not None: if iframe_restriction in ['SAMEORIGIN', 'DENY']: self.response.headers['X-Frame-Options'] = iframe_restriction else: raise Exception( 'Invalid X-Frame-Options: %s' % iframe_restriction) self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT' self.response.pragma = 'no-cache' self.response.write(self.jinja2_env.get_template( filename).render(**values)) # Calculate the processing time of this request. duration = datetime.datetime.utcnow() - self.start_time processing_time = duration.seconds + duration.microseconds / 1E6 counters.HTML_RESPONSE_TIME_SECS.inc(increment=processing_time) counters.HTML_RESPONSE_COUNT.inc() def _render_exception(self, error_code, values): assert error_code in [400, 401, 404, 500] values['code'] = error_code # This checks if the response should be JSON or HTML. if self.payload is not None: self.render_json(values) else: self.values.update(values) self.render_template( 'error/error.html', iframe_restriction=None) def handle_exception(self, exception, unused_debug_mode): """Overwrites the default exception handler.""" logging.info(''.join(traceback.format_exception(*sys.exc_info()))) logging.error('Exception raised: %s', exception) if isinstance(exception, self.PageNotFoundException): logging.error('Invalid URL requested: %s', self.request.uri) self.error(404) self._render_exception(404, { 'error': 'Could not find the page %s.' % self.request.uri}) return if isinstance(exception, self.NotLoggedInException): self.redirect( current_user_services.create_login_url(self.request.uri)) return if isinstance(exception, self.UnauthorizedUserException): self.error(401) self._render_exception(401, {'error': unicode(exception)}) return if isinstance(exception, self.InvalidInputException): self.error(400) self._render_exception(400, {'error': unicode(exception)}) return if isinstance(exception, self.InternalErrorException): self.error(500) self._render_exception(500, {'error': unicode(exception)}) return self.error(500) self._render_exception(500, {'error': unicode(exception)}) class UnauthorizedUserException(Exception): """Error class for unauthorized access.""" class NotLoggedInException(Exception): """Error class for users that are not logged in (error code 401).""" class InvalidInputException(Exception): """Error class for invalid input on the user side (error code 400).""" class PageNotFoundException(Exception): """Error class for a page not found error (error code 404).""" class InternalErrorException(Exception): """Error class for an internal server side error (error code 500).""" class Error404Handler(BaseHandler): """Handles 404 errors.""" REQUIRE_PAYLOAD_CSRF_CHECK = False class CsrfTokenManager(object): """Manages page/user tokens in memcache to protect against CSRF.""" # Max age of the token (48 hours). _CSRF_TOKEN_AGE_SECS = 60 * 60 * 48 # Default user id for non-logged-in users. _USER_ID_DEFAULT = 'non_logged_in_user' @classmethod def init_csrf_secret(cls): """Verify that non-default CSRF secret exists; creates one if not.""" # Any non-default value is fine. if CSRF_SECRET.value and CSRF_SECRET.value != DEFAULT_CSRF_SECRET: return # Initialize to random value. config_services.set_property( feconf.SYSTEM_COMMITTER_ID, CSRF_SECRET.name, base64.urlsafe_b64encode(os.urandom(20))) @classmethod def _create_token(cls, user_id, issued_on): """Creates a digest (string representation) of a token.""" cls.init_csrf_secret() # The token has 4 parts: hash of the actor user id, hash of the page # name, hash of the time issued and plain text of the time issued. if user_id is None: user_id = cls._USER_ID_DEFAULT # Round time to seconds. issued_on = long(issued_on) digester = hmac.new(str(CSRF_SECRET.value)) digester.update(str(user_id)) digester.update(':') digester.update(str(issued_on)) digest = digester.digest() token = '%s/%s' % (issued_on, base64.urlsafe_b64encode(digest)) return token @classmethod def _get_current_time(cls): return time.time() @classmethod def create_csrf_token(cls, user_id): return cls._create_token(user_id, cls._get_current_time()) @classmethod def is_csrf_token_valid(cls, user_id, token): """Validate a given CSRF token with the CSRF secret in memcache.""" try: parts = token.split('/') if len(parts) != 2: return False issued_on = long(parts[0]) age = cls._get_current_time() - issued_on if age > cls._CSRF_TOKEN_AGE_SECS: return False authentic_token = cls._create_token(user_id, issued_on) if authentic_token == token: return True return False except Exception: return False
{ "content_hash": "5a593befa0d1d0a644ac779ab7274263", "timestamp": "", "source": "github", "line_count": 522, "max_line_length": 83, "avg_line_length": 38.333333333333336, "alnum_prop": 0.6165917041479261, "repo_name": "bjvoth/oppia", "id": "b13951e2f57e336463486741e7c24d88ab668c25", "size": "20615", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "core/controllers/base.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "363" }, { "name": "CSS", "bytes": "49016" }, { "name": "HTML", "bytes": "303070" }, { "name": "JavaScript", "bytes": "1412498" }, { "name": "Python", "bytes": "1655680" }, { "name": "Shell", "bytes": "26876" } ], "symlink_target": "" }
from django.conf import settings from django.urls import re_path from django.utils.translation import ugettext_lazy as _ from horizon.browsers.views import AngularIndexView from senlin_dashboard.api import rest # noqa: F401 from senlin_dashboard.cluster.receivers import views as legacyViews if settings.ANGULAR_FEATURES.get('receivers_panel', True): title = _("Receivers") urlpatterns = [ re_path(r'^$', AngularIndexView.as_view(title=title), name='index'), ] else: urlpatterns = [ re_path(r'^$', legacyViews.IndexView.as_view(), name='index'), re_path(r'^create/$', legacyViews.CreateView.as_view(), name='create'), re_path(r'^(?P<receiver_id>[^/]+)/$', legacyViews.DetailView.as_view(), name='detail'), ]
{ "content_hash": "7d43dd8ca73b451cb56810d02f054938", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 76, "avg_line_length": 34.65217391304348, "alnum_prop": 0.6599749058971142, "repo_name": "stackforge/senlin-dashboard", "id": "fe73b0e47cb73e08752f2cf2e6bd551583955199", "size": "1338", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "senlin_dashboard/cluster/receivers/urls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "10910" }, { "name": "Python", "bytes": "120599" }, { "name": "Shell", "bytes": "12998" } ], "symlink_target": "" }
import typing as t from datetime import datetime from .._internal import _to_str from ..datastructures import Accept from ..datastructures import Authorization from ..datastructures import CharsetAccept from ..datastructures import ETags from ..datastructures import Headers from ..datastructures import HeaderSet from ..datastructures import IfRange from ..datastructures import ImmutableList from ..datastructures import ImmutableMultiDict from ..datastructures import LanguageAccept from ..datastructures import MIMEAccept from ..datastructures import MultiDict from ..datastructures import Range from ..datastructures import RequestCacheControl from ..http import parse_accept_header from ..http import parse_authorization_header from ..http import parse_cache_control_header from ..http import parse_date from ..http import parse_etags from ..http import parse_if_range_header from ..http import parse_list_header from ..http import parse_options_header from ..http import parse_range_header from ..http import parse_set_header from ..urls import url_decode from ..user_agent import UserAgent from ..utils import cached_property from ..utils import header_property from .http import parse_cookie from .utils import get_current_url from .utils import get_host class Request: """Represents the non-IO parts of a HTTP request, including the method, URL info, and headers. This class is not meant for general use. It should only be used when implementing WSGI, ASGI, or another HTTP application spec. Werkzeug provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`. :param method: The method the request was made with, such as ``GET``. :param scheme: The URL scheme of the protocol the request used, such as ``https`` or ``wss``. :param server: The address of the server. ``(host, port)``, ``(path, None)`` for unix sockets, or ``None`` if not known. :param root_path: The prefix that the application is mounted under. This is prepended to generated URLs, but is not part of route matching. :param path: The path part of the URL after ``root_path``. :param query_string: The part of the URL after the "?". :param headers: The headers received with the request. :param remote_addr: The address of the client sending the request. .. versionadded:: 2.0 """ #: The charset used to decode most data in the request. charset = "utf-8" #: the error handling procedure for errors, defaults to 'replace' encoding_errors = "replace" #: the class to use for `args` and `form`. The default is an #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports #: multiple values per key. alternatively it makes sense to use an #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict` #: which is the fastest but only remembers the last key. It is also #: possible to use mutable structures, but this is not recommended. #: #: .. versionadded:: 0.6 parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict #: The type to be used for dict values from the incoming WSGI #: environment. (For example for :attr:`cookies`.) By default an #: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used. #: #: .. versionchanged:: 1.0.0 #: Changed to ``ImmutableMultiDict`` to support multiple values. #: #: .. versionadded:: 0.6 dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict #: the type to be used for list values from the incoming WSGI environment. #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used #: (for example for :attr:`access_list`). #: #: .. versionadded:: 0.6 list_storage_class: t.Type[t.List] = ImmutableList user_agent_class: t.Type[UserAgent] = UserAgent """The class used and returned by the :attr:`user_agent` property to parse the header. Defaults to :class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An extension can provide a subclass that uses a parser to provide other data. .. versionadded:: 2.0 """ #: Valid host names when handling requests. By default all hosts are #: trusted, which means that whatever the client says the host is #: will be accepted. #: #: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to #: any value by a malicious client, it is recommended to either set #: this property or implement similar validation in the proxy (if #: the application is being run behind one). #: #: .. versionadded:: 0.9 trusted_hosts: t.Optional[t.List[str]] = None def __init__( self, method: str, scheme: str, server: t.Optional[t.Tuple[str, t.Optional[int]]], root_path: str, path: str, query_string: bytes, headers: Headers, remote_addr: t.Optional[str], ) -> None: #: The method the request was made with, such as ``GET``. self.method = method.upper() #: The URL scheme of the protocol the request used, such as #: ``https`` or ``wss``. self.scheme = scheme #: The address of the server. ``(host, port)``, ``(path, None)`` #: for unix sockets, or ``None`` if not known. self.server = server #: The prefix that the application is mounted under, without a #: trailing slash. :attr:`path` comes after this. self.root_path = root_path.rstrip("/") #: The path part of the URL after :attr:`root_path`. This is the #: path used for routing within the application. self.path = "/" + path.lstrip("/") #: The part of the URL after the "?". This is the raw value, use #: :attr:`args` for the parsed values. self.query_string = query_string #: The headers received with the request. self.headers = headers #: The address of the client sending the request. self.remote_addr = remote_addr def __repr__(self) -> str: try: url = self.url except Exception as e: url = f"(invalid URL: {e})" return f"<{type(self).__name__} {url!r} [{self.method}]>" @property def url_charset(self) -> str: """The charset that is assumed for URLs. Defaults to the value of :attr:`charset`. .. versionadded:: 0.6 """ return self.charset @cached_property def args(self) -> "MultiDict[str, str]": """The parsed URL parameters (the part in the URL after the question mark). By default an :class:`~werkzeug.datastructures.ImmutableMultiDict` is returned from this function. This can be changed by setting :attr:`parameter_storage_class` to a different type. This might be necessary if the order of the form data is important. """ return url_decode( self.query_string, self.url_charset, errors=self.encoding_errors, cls=self.parameter_storage_class, ) @cached_property def access_route(self) -> t.List[str]: """If a forwarded header exists this is a list of all ip addresses from the client ip to the last proxy server. """ if "X-Forwarded-For" in self.headers: return self.list_storage_class( parse_list_header(self.headers["X-Forwarded-For"]) ) elif self.remote_addr is not None: return self.list_storage_class([self.remote_addr]) return self.list_storage_class() @cached_property def full_path(self) -> str: """Requested path, including the query string.""" return f"{self.path}?{_to_str(self.query_string, self.url_charset)}" @property def is_secure(self) -> bool: """``True`` if the request was made with a secure protocol (HTTPS or WSS). """ return self.scheme in {"https", "wss"} @cached_property def url(self) -> str: """The full request URL with the scheme, host, root path, path, and query string.""" return get_current_url( self.scheme, self.host, self.root_path, self.path, self.query_string ) @cached_property def base_url(self) -> str: """Like :attr:`url` but without the query string.""" return get_current_url(self.scheme, self.host, self.root_path, self.path) @cached_property def root_url(self) -> str: """The request URL scheme, host, and root path. This is the root that the application is accessed from. """ return get_current_url(self.scheme, self.host, self.root_path) @cached_property def host_url(self) -> str: """The request URL scheme and host only.""" return get_current_url(self.scheme, self.host) @cached_property def host(self) -> str: """The host name the request was made to, including the port if it's non-standard. Validated with :attr:`trusted_hosts`. """ return get_host( self.scheme, self.headers.get("host"), self.server, self.trusted_hosts ) @cached_property def cookies(self) -> "ImmutableMultiDict[str, str]": """A :class:`dict` with the contents of all cookies transmitted with the request.""" wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie")) return parse_cookie( # type: ignore wsgi_combined_cookie, self.charset, self.encoding_errors, cls=self.dict_storage_class, ) # Common Descriptors content_type = header_property[str]( "Content-Type", doc="""The Content-Type entity-header field indicates the media type of the entity-body sent to the recipient or, in the case of the HEAD method, the media type that would have been sent had the request been a GET.""", read_only=True, ) @cached_property def content_length(self) -> t.Optional[int]: """The Content-Length entity-header field indicates the size of the entity-body in bytes or, in the case of the HEAD method, the size of the entity-body that would have been sent had the request been a GET. """ if self.headers.get("Transfer-Encoding", "") == "chunked": return None content_length = self.headers.get("Content-Length") if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass return None content_encoding = header_property[str]( "Content-Encoding", doc="""The Content-Encoding entity-header field is used as a modifier to the media-type. When present, its value indicates what additional content codings have been applied to the entity-body, and thus what decoding mechanisms must be applied in order to obtain the media-type referenced by the Content-Type header field. .. versionadded:: 0.9""", read_only=True, ) content_md5 = header_property[str]( "Content-MD5", doc="""The Content-MD5 entity-header field, as defined in RFC 1864, is an MD5 digest of the entity-body for the purpose of providing an end-to-end message integrity check (MIC) of the entity-body. (Note: a MIC is good for detecting accidental modification of the entity-body in transit, but is not proof against malicious attacks.) .. versionadded:: 0.9""", read_only=True, ) referrer = header_property[str]( "Referer", doc="""The Referer[sic] request-header field allows the client to specify, for the server's benefit, the address (URI) of the resource from which the Request-URI was obtained (the "referrer", although the header field is misspelled).""", read_only=True, ) date = header_property( "Date", None, parse_date, doc="""The Date general-header field represents the date and time at which the message was originated, having the same semantics as orig-date in RFC 822. .. versionchanged:: 2.0 The datetime object is timezone-aware. """, read_only=True, ) max_forwards = header_property( "Max-Forwards", None, int, doc="""The Max-Forwards request-header field provides a mechanism with the TRACE and OPTIONS methods to limit the number of proxies or gateways that can forward the request to the next inbound server.""", read_only=True, ) def _parse_content_type(self) -> None: if not hasattr(self, "_parsed_content_type"): self._parsed_content_type = parse_options_header( self.headers.get("Content-Type", "") ) @property def mimetype(self) -> str: """Like :attr:`content_type`, but without parameters (eg, without charset, type etc.) and always lowercase. For example if the content type is ``text/HTML; charset=utf-8`` the mimetype would be ``'text/html'``. """ self._parse_content_type() return self._parsed_content_type[0].lower() @property def mimetype_params(self) -> t.Dict[str, str]: """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. """ self._parse_content_type() return self._parsed_content_type[1] @cached_property def pragma(self) -> HeaderSet: """The Pragma general-header field is used to include implementation-specific directives that might apply to any recipient along the request/response chain. All pragma directives specify optional behavior from the viewpoint of the protocol; however, some systems MAY require that behavior be consistent with the directives. """ return parse_set_header(self.headers.get("Pragma", "")) # Accept @cached_property def accept_mimetypes(self) -> MIMEAccept: """List of mimetypes this client supports as :class:`~werkzeug.datastructures.MIMEAccept` object. """ return parse_accept_header(self.headers.get("Accept"), MIMEAccept) @cached_property def accept_charsets(self) -> CharsetAccept: """List of charsets this client supports as :class:`~werkzeug.datastructures.CharsetAccept` object. """ return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept) @cached_property def accept_encodings(self) -> Accept: """List of encodings this client accepts. Encodings in a HTTP term are compression encodings such as gzip. For charsets have a look at :attr:`accept_charset`. """ return parse_accept_header(self.headers.get("Accept-Encoding")) @cached_property def accept_languages(self) -> LanguageAccept: """List of languages this client accepts as :class:`~werkzeug.datastructures.LanguageAccept` object. .. versionchanged 0.5 In previous versions this was a regular :class:`~werkzeug.datastructures.Accept` object. """ return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept) # ETag @cached_property def cache_control(self) -> RequestCacheControl: """A :class:`~werkzeug.datastructures.RequestCacheControl` object for the incoming cache control headers. """ cache_control = self.headers.get("Cache-Control") return parse_cache_control_header(cache_control, None, RequestCacheControl) @cached_property def if_match(self) -> ETags: """An object containing all the etags in the `If-Match` header. :rtype: :class:`~werkzeug.datastructures.ETags` """ return parse_etags(self.headers.get("If-Match")) @cached_property def if_none_match(self) -> ETags: """An object containing all the etags in the `If-None-Match` header. :rtype: :class:`~werkzeug.datastructures.ETags` """ return parse_etags(self.headers.get("If-None-Match")) @cached_property def if_modified_since(self) -> t.Optional[datetime]: """The parsed `If-Modified-Since` header as a datetime object. .. versionchanged:: 2.0 The datetime object is timezone-aware. """ return parse_date(self.headers.get("If-Modified-Since")) @cached_property def if_unmodified_since(self) -> t.Optional[datetime]: """The parsed `If-Unmodified-Since` header as a datetime object. .. versionchanged:: 2.0 The datetime object is timezone-aware. """ return parse_date(self.headers.get("If-Unmodified-Since")) @cached_property def if_range(self) -> IfRange: """The parsed ``If-Range`` header. .. versionchanged:: 2.0 ``IfRange.date`` is timezone-aware. .. versionadded:: 0.7 """ return parse_if_range_header(self.headers.get("If-Range")) @cached_property def range(self) -> t.Optional[Range]: """The parsed `Range` header. .. versionadded:: 0.7 :rtype: :class:`~werkzeug.datastructures.Range` """ return parse_range_header(self.headers.get("Range")) # User Agent @cached_property def user_agent(self) -> UserAgent: """The user agent. Use ``user_agent.string`` to get the header value. Set :attr:`user_agent_class` to a subclass of :class:`~werkzeug.user_agent.UserAgent` to provide parsing for the other properties or other extended data. .. versionchanged:: 2.0 The built in parser is deprecated and will be removed in Werkzeug 2.1. A ``UserAgent`` subclass must be set to parse data from the string. """ return self.user_agent_class(self.headers.get("User-Agent", "")) # Authorization @cached_property def authorization(self) -> t.Optional[Authorization]: """The `Authorization` object in parsed form.""" return parse_authorization_header(self.headers.get("Authorization")) # CORS origin = header_property[str]( "Origin", doc=( "The host that the request originated from. Set" " :attr:`~CORSResponseMixin.access_control_allow_origin` on" " the response to indicate which origins are allowed." ), read_only=True, ) access_control_request_headers = header_property( "Access-Control-Request-Headers", load_func=parse_set_header, doc=( "Sent with a preflight request to indicate which headers" " will be sent with the cross origin request. Set" " :attr:`~CORSResponseMixin.access_control_allow_headers`" " on the response to indicate which headers are allowed." ), read_only=True, ) access_control_request_method = header_property[str]( "Access-Control-Request-Method", doc=( "Sent with a preflight request to indicate which method" " will be used for the cross origin request. Set" " :attr:`~CORSResponseMixin.access_control_allow_methods`" " on the response to indicate which methods are allowed." ), read_only=True, ) @property def is_json(self) -> bool: """Check if the mimetype indicates JSON data, either :mimetype:`application/json` or :mimetype:`application/*+json`. """ mt = self.mimetype return ( mt == "application/json" or mt.startswith("application/") and mt.endswith("+json") )
{ "content_hash": "a94a0a07dce50f87fa6457afbf84c86c", "timestamp": "", "source": "github", "line_count": 547, "max_line_length": 87, "avg_line_length": 36.88299817184644, "alnum_prop": 0.6304337050805452, "repo_name": "pallets/werkzeug", "id": "8832baafeeea4e74146fa9d24ea328542d98413a", "size": "20175", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "src/werkzeug/sansio/request.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "6078" }, { "name": "HTML", "bytes": "124" }, { "name": "JavaScript", "bytes": "10521" }, { "name": "Python", "bytes": "1095568" } ], "symlink_target": "" }
"""smarttub constants.""" DOMAIN = "smarttub" EVENT_SMARTTUB = "smarttub" SMARTTUB_CONTROLLER = "smarttub_controller" SCAN_INTERVAL = 60 POLLING_TIMEOUT = 10 API_TIMEOUT = 5 DEFAULT_MIN_TEMP = 18.5 DEFAULT_MAX_TEMP = 40 # the device doesn't remember any state for the light, so we have to choose a # mode (smarttub.SpaLight.LightMode) when turning it on. There is no white # mode. DEFAULT_LIGHT_EFFECT = "purple" # default to 50% brightness DEFAULT_LIGHT_BRIGHTNESS = 128 ATTR_LIGHTS = "lights" ATTR_PUMPS = "pumps" ATTR_REMINDERS = "reminders" ATTR_STATUS = "status"
{ "content_hash": "476146507f1fb2536fa112a8b2b8db3e", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 77, "avg_line_length": 21.333333333333332, "alnum_prop": 0.7309027777777778, "repo_name": "adrienbrault/home-assistant", "id": "23bd8bd8ec0da3b1d8e91d4bff5f8154c027ae67", "size": "576", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/smarttub/const.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1795" }, { "name": "Python", "bytes": "32021043" }, { "name": "Shell", "bytes": "4900" } ], "symlink_target": "" }
import logging from five import grok from zope import schema from plone.directives import dexterity, form from plone.dexterity.content import Container from vfu.events import MessageFactory as _ class IVFUMemberEvent(form.Schema): """ Event info """ available = schema.Bool(title=_(u'Available'), required=False, default=True) class VFUMemberEvent(Container): grok.implements(IVFUMemberEvent)
{ "content_hash": "ed2e9d9fe4a0bf812ba7ae3de39dbbc5", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 80, "avg_line_length": 22.263157894736842, "alnum_prop": 0.75177304964539, "repo_name": "a25kk/vfu", "id": "d1843edab3e4881c45cd8f7f1307259d9727b04a", "size": "423", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/vfu.events/vfu/events/member_event.py", "mode": "33261", "license": "mit", "language": [ { "name": "Brainfuck", "bytes": "5661" }, { "name": "CSS", "bytes": "624320" }, { "name": "Dockerfile", "bytes": "110" }, { "name": "HTML", "bytes": "223242" }, { "name": "JavaScript", "bytes": "366575" }, { "name": "Makefile", "bytes": "2499" }, { "name": "Python", "bytes": "85859" }, { "name": "Shell", "bytes": "3078" } ], "symlink_target": "" }
from django.conf import settings # import the settings file def display_name(request): # return the value you want as a dictionary. you may add multiple values in there. return {'DISPLAY_NAME': settings.DISPLAY_NAME} def config_installed(request): if 'config' in settings.INSTALLED_APPS: return {'CONFIG_INSTALLED': True} else: return {'CONFIG_INSTALLED': False}
{ "content_hash": "8fa4df9e6a1aa61ceecbf09a2cd7142a", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 86, "avg_line_length": 33.083333333333336, "alnum_prop": 0.707808564231738, "repo_name": "chasetb/sal", "id": "2b75b25b00110670d7fc09df612cfd43e05c503a", "size": "397", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "sal/context_processors.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "192288" }, { "name": "HTML", "bytes": "119776" }, { "name": "JavaScript", "bytes": "683793" }, { "name": "Makefile", "bytes": "2284" }, { "name": "Nginx", "bytes": "1946" }, { "name": "Python", "bytes": "346909" }, { "name": "Shell", "bytes": "1964" } ], "symlink_target": "" }