Search is not available for this dataset
text
stringlengths
75
104k
def scale(self, *args): """ Scales the data down to the given size args must be of the form:: <data set 1 minimum value>, <data set 1 maximum value>, <data set n minimum value>, <data set n maximum value> will only work with text encoding! APIPARAM: chds """ self._scale = [','.join(map(smart_str, args))] return self
def dataset(self, data, series=''): """ Update the chart's dataset, can be two dimensional or contain string data """ self._dataset = data self._series = series return self
def marker(self, *args): """ Defines markers one at a time for your graph args are of the form:: <marker type>, <color>, <data set index>, <data point>, <size>, <priority> see the official developers doc for the complete spec APIPARAM: chm """ if len(args[0]) == 1: assert args[0] in MARKERS, 'Invalid marker type: %s'%args[0] assert len(args) <= 6, 'Incorrect arguments %s'%str(args) args = color_args(args, 1) self.markers.append(','.join(map(str,args)) ) return self
def margin(self, left, right, top, bottom, lwidth=0, lheight=0): """ Set margins for chart area args are of the form:: <left margin>, <right margin>, <top margin>, <bottom margin>| <legend width>, <legend height> APIPARAM: chma """ self['chma'] = '%d,%d,%d,%d' % (left, right, top, bottom) if lwidth or lheight: self['chma'] += '|%d,%d' % (lwidth, lheight) return self
def line(self, *args): """ Called one at a time for each dataset args are of the form:: <data set n line thickness>, <length of line segment>, <length of blank segment> APIPARAM: chls """ self.lines.append(','.join(['%.1f'%x for x in map(float,args)])) return self
def fill(self, *args): """ Apply a solid fill to your chart args are of the form <fill type>,<fill style>,... fill type must be one of c,bg,a fill style must be one of s,lg,ls the rest of the args refer to the particular style APIPARAM: chf """ a,b = args[:2] assert a in ('c','bg','a'), 'Fill type must be bg/c/a not %s'%a assert b in ('s','lg','ls'), 'Fill style must be s/lg/ls not %s'%b if len(args) == 3: args = color_args(args, 2) else: args = color_args(args, 3,5) self.fills.append(','.join(map(str,args))) return self
def grid(self, *args): """ Apply a grid to your chart args are of the form:: <x axis step size>, <y axis step size>, <length of line segment>, <length of blank segment> <x offset>, <y offset> APIPARAM: chg """ grids = map(str,map(float,args)) self['chg'] = ','.join(grids).replace('None','') return self
def color(self, *args): """ Add a color for each dataset args are of the form <color 1>,...<color n> APIPARAM: chco """ args = color_args(args, *range(len(args))) self['chco'] = ','.join(args) return self
def label(self, *args): """ Add a simple label to your chart call each time for each dataset APIPARAM: chl """ if self['cht'] == 'qr': self['chl'] = ''.join(map(str,args)) else: self['chl'] = '|'.join(map(str,args)) return self
def legend_pos(self, pos): """ Define a position for your legend to occupy APIPARAM: chdlp """ assert pos in LEGEND_POSITIONS, 'Unknown legend position: %s'%pos self['chdlp'] = str(pos) return self
def title(self, title, *args): """ Add a title to your chart args are optional style params of the form <color>,<font size> APIPARAMS: chtt,chts """ self['chtt'] = title if args: args = color_args(args, 0) self['chts'] = ','.join(map(str,args)) return self
def size(self,*args): """ Set the size of the chart, args are width,height and can be tuple APIPARAM: chs """ if len(args) == 2: x,y = map(int,args) else: x,y = map(int,args[0]) self.check_size(x,y) self['chs'] = '%dx%d'%(x,y) return self
def render(self): """ Renders the chart context and axes into the dict data """ self.update(self.axes.render()) encoder = Encoder(self._encoding, None, self._series) if not 'chs' in self: self['chs'] = '300x150' else: size = self['chs'].split('x') assert len(size) == 2, 'Invalid size, must be in the format WxH' self.check_size(*map(int,size)) assert 'cht' in self, 'No chart type defined, use type method' self['cht'] = self.check_type(self['cht']) if ('any' in dir(self._dataset) and self._dataset.any()) or self._dataset: self['chd'] = encoder.encode(self._dataset) elif not 'choe' in self: assert 'chd' in self, 'You must have a dataset, or use chd' if self._scale: assert self['chd'].startswith('t'),\ 'You must use text encoding with chds' self['chds'] = ','.join(self._scale) if self._geo and self._ld: self['chtm'] = self._geo self['chld'] = self._ld if self.lines: self['chls'] = '|'.join(self.lines) if self.markers: self['chm'] = '|'.join(self.markers) if self.fills: self['chf'] = '|'.join(self.fills)
def check_type(self, type): """Check to see if the type is either in TYPES or fits type name Returns proper type """ if type in TYPES: return type tdict = dict(zip(TYPES,TYPES)) tdict.update({ 'line': 'lc', 'bar': 'bvs', 'pie': 'p', 'venn': 'v', 'scater': 's', 'radar': 'r', 'meter': 'gom', }) assert type in tdict, 'Invalid chart type: %s'%type return tdict[type]
def url(self): """ Returns the rendered URL of the chart """ self.render() return self._apiurl + '&'.join(self._parts()).replace(' ','+')
def show(self, *args, **kwargs): """ Shows the chart URL in a webbrowser Other arguments passed to webbrowser.open """ from webbrowser import open as webopen return webopen(str(self), *args, **kwargs)
def save(self, fname=None): """ Download the chart from the URL into a filename as a PNG The filename defaults to the chart title (chtt) if any """ if not fname: fname = self.getname() assert fname != None, 'You must specify a filename to save to' if not fname.endswith('.png'): fname += '.png' try: urlretrieve(self.url, fname) except Exception: raise IOError('Problem saving %s to file'%fname) return fname
def img(self, **kwargs): """ Returns an XHTML <img/> tag of the chart kwargs can be other img tag attributes, which are strictly enforced uses strict escaping on the url, necessary for proper XHTML """ safe = 'src="%s" ' % self.url.replace('&','&amp;').replace('<', '&lt;')\ .replace('>', '&gt;').replace('"', '&quot;').replace( "'", '&#39;') for item in kwargs.items(): if not item[0] in IMGATTRS: raise AttributeError('Invalid img tag attribute: %s'%item[0]) safe += '%s="%s" '%item return '<img %s/>'%safe
def urlopen(self): """ Grabs readable PNG file pointer """ req = Request(str(self)) try: return urlopen(req) except HTTPError: _print('The server couldn\'t fulfill the request.') except URLError: _print('We failed to reach a server.')
def image(self): """ Returns a PngImageFile instance of the chart You must have PIL installed for this to work """ try: try: import Image except ImportError: from PIL import Image except ImportError: raise ImportError('You must install PIL to fetch image objects') try: from cStringIO import StringIO except ImportError: from StringIO import StringIO return Image.open(StringIO(self.urlopen().read()))
def write(self, fp): """ Writes out PNG image data in chunks to file pointer fp fp must support w or wb """ urlfp = self.urlopen().fp while 1: try: fp.write(urlfp.next()) except StopIteration: return
def checksum(self): """ Returns the unique SHA1 hexdigest of the chart URL param parts good for unittesting... """ self.render() return new_sha(''.join(sorted(self._parts()))).hexdigest()
def get_codes(): """ >> get_codes() ISO ISO3 ISO-Numeric fips Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode """ cache_filename = os.path.join(os.path.dirname(__file__), 'data', 'countryInfo.txt') data = [] for line in open(cache_filename, 'r'): if not line.startswith('#'): data.append(line.split('\t')) return data
def amount(min=1, max=sys.maxsize, decimal_places=2): """ return a random floating number :param min: minimum value :param max: maximum value :param decimal_places: decimal places :return: """ q = '.%s1' % '0' * (decimal_places - 1) return decimal.Decimal(uniform(min, max)).quantize(decimal.Decimal(q))
def entity_name_decorator(top_cls): """ Assign an entity name based on the class immediately inhering from Base. This is needed because we don't want entity names to come from any class that simply inherits our classes, just the ones in our module. For example, if you create a class Project2 that exists outside of kalibro_client and inherits from Project, it's entity name should still be Project. """ class_name = inflection.underscore(top_cls.__name__).lower() def entity_name(cls): return class_name top_cls.entity_name = classmethod(entity_name) return top_cls
def eval(self, orig): """ Apply the less or equal algorithm on the ordered list of metadata statements :param orig: Start values :return: """ _le = {} _err = [] for k, v in self.sup_items(): if k in DoNotCompare: continue if k in orig: if is_lesser(orig[k], v): _le[k] = orig[k] else: _err.append({'claim': k, 'policy': orig[k], 'err': v, 'signer': self.iss}) else: _le[k] = v for k, v in orig.items(): if k in DoNotCompare: continue if k not in _le: _le[k] = v self.le = _le self.err = _err
def unprotected_and_protected_claims(self): """ This is both verified and self asserted information. As expected verified information beats self-asserted so if there is both self-asserted and verified values for a claim then only the verified will be returned. """ if self.sup: res = {} for k, v in self.le.items(): if k not in self.sup.le: res[k] = v else: res[k] = self.sup.le[k] return res else: return self.le
def signing_keys_as_jwks(self): """ Build a JWKS from the signing keys belonging to the self signer :return: Dictionary """ _l = [x.serialize() for x in self.self_signer.keyjar.get_signing_key()] if not _l: _l = [x.serialize() for x in self.self_signer.keyjar.get_signing_key(owner=self.iss)] return {'keys': _l}
def _unpack(self, ms_dict, keyjar, cls, jwt_ms=None, liss=None): """ :param ms_dict: Metadata statement as a dictionary :param keyjar: A keyjar with the necessary FO keys :param cls: What class to map the metadata into :param jwt_ms: Metadata statement as a JWS :param liss: List of FO issuer IDs :return: ParseInfo instance """ if liss is None: liss = [] _pr = ParseInfo() _pr.input = ms_dict ms_flag = False if 'metadata_statements' in ms_dict: ms_flag = True for iss, _ms in ms_dict['metadata_statements'].items(): if liss and iss not in liss: continue _pr = self._ums(_pr, _ms, keyjar) if 'metadata_statement_uris' in ms_dict: ms_flag = True if self.httpcli: for iss, url in ms_dict['metadata_statement_uris'].items(): if liss and iss not in liss: continue rsp = self.httpcli(method='GET', url=url, verify=self.verify_ssl) if rsp.status_code == 200: _pr = self._ums(_pr, rsp.text, keyjar) else: raise ParseError( 'Could not fetch jws from {}'.format(url)) for _ms in _pr.parsed_statement: if _ms: # can be None loaded = False try: keyjar.import_jwks_as_json(_ms['signing_keys'], ms_dict['iss']) except KeyError: pass except TypeError: try: keyjar.import_jwks(_ms['signing_keys'], ms_dict['iss']) except Exception as err: logger.error(err) raise else: loaded = True else: loaded = True if loaded: logger.debug( 'Loaded signing keys belonging to {} into the ' 'keyjar'.format(ms_dict['iss'])) if ms_flag is True and not _pr.parsed_statement: return _pr if jwt_ms: logger.debug("verifying signed JWT: {}".format(jwt_ms)) try: _pr.result = cls().from_jwt(jwt_ms, keyjar=keyjar) except MissingSigningKey: if 'signing_keys' in ms_dict: try: _pr.result = self.self_signed(ms_dict, jwt_ms, cls) except MissingSigningKey as err: logger.error('Encountered: {}'.format(err)) _pr.error[jwt_ms] = err except (JWSException, BadSignature, KeyError) as err: logger.error('Encountered: {}'.format(err)) _pr.error[jwt_ms] = err else: _pr.result = ms_dict if _pr.result and _pr.parsed_statement: _prr = _pr.result _res = {} for x in _pr.parsed_statement: if x: _res[get_fo(x)] = x _msg = Message(**_res) logger.debug('Resulting metadata statement: {}'.format(_msg)) _pr.result['metadata_statements'] = _msg return _pr
def unpack_metadata_statement(self, ms_dict=None, jwt_ms='', keyjar=None, cls=ClientMetadataStatement, liss=None): """ Starting with a signed JWT or a JSON document unpack and verify all the separate metadata statements. :param ms_dict: Metadata statement as a dictionary :param jwt_ms: Metadata statement as JWT :param keyjar: Keys that should be used to verify the signature of the document :param cls: What type (Class) of metadata statement this is :param liss: list of FO identifiers that matters. The rest will be ignored :return: A ParseInfo instance """ if not keyjar: if self.jwks_bundle: keyjar = self.jwks_bundle.as_keyjar() else: keyjar = KeyJar() if jwt_ms: try: ms_dict = unfurl(jwt_ms) except JWSException as err: logger.error('Could not unfurl jwt_ms due to {}'.format(err)) raise if ms_dict: return self._unpack(ms_dict, keyjar, cls, jwt_ms, liss) else: raise AttributeError('Need one of ms_dict or jwt_ms')
def pack_metadata_statement(self, metadata, receiver='', iss='', lifetime=0, sign_alg=''): """ Given a MetadataStatement instance create a signed JWT. :param metadata: Original metadata statement as a MetadataStatement instance :param receiver: Receiver (audience) of the JWT :param iss: Issuer ID if different from default :param lifetime: jWT signature life time :param sign_alg: JWT signature algorithm :return: A JWT """ return self.self_signer.sign(metadata, receiver=receiver, iss=iss, lifetime=lifetime, sign_alg=sign_alg)
def evaluate_metadata_statement(self, metadata, keyjar=None): """ Computes the resulting metadata statement from a compounded metadata statement. If something goes wrong during the evaluation an exception is raised :param metadata: The compounded metadata statement as a dictionary :return: A list of :py:class:`fedoidc.operator.LessOrEqual` instances, one per FO. """ # start from the innermost metadata statement and work outwards res = dict([(k, v) for k, v in metadata.items() if k not in IgnoreKeys]) les = [] if 'metadata_statements' in metadata: for fo, ms in metadata['metadata_statements'].items(): if isinstance(ms, str): ms = json.loads(ms) for _le in self.evaluate_metadata_statement(ms): if isinstance(ms, Message): le = LessOrEqual(sup=_le, **ms.to_dict()) else: # Must be a dict le = LessOrEqual(sup=_le, **ms) if le.is_expired(): logger.error( 'This metadata statement has expired: {}'.format(ms) ) logger.info('My time: {}'.format(utc_time_sans_frac())) continue le.eval(res) les.append(le) return les else: # this is the innermost try: _iss = metadata['iss'] except: le = LessOrEqual() le.eval(res) else: le = LessOrEqual(iss=_iss, exp=metadata['exp']) le.eval(res) les.append(le) return les
def correct_usage(self, metadata, federation_usage): """ Remove MS paths that are marked to be used for another usage :param metadata: Metadata statement as dictionary :param federation_usage: In which context this is expected to used. :return: Filtered Metadata statement. """ if 'metadata_statements' in metadata: _msl = {} for fo, ms in metadata['metadata_statements'].items(): if not isinstance(ms, Message): ms = json.loads(ms) if self.correct_usage(ms, federation_usage=federation_usage): _msl[fo] = ms if _msl: metadata['metadata_statements'] = Message(**_msl) return metadata else: return None else: # this is the innermost try: assert federation_usage == metadata['federation_usage'] except KeyError: pass except AssertionError: return None return metadata
def extend_with_ms(self, req, sms_dict): """ Add signed metadata statements to a request :param req: The request :param sms_dict: A dictionary with FO IDs as keys and signed metadata statements (sms) or uris pointing to sms as values. :return: The updated request """ _ms_uri = {} _ms = {} for fo, sms in sms_dict.items(): if sms.startswith('http://') or sms.startswith('https://'): _ms_uri[fo] = sms else: _ms[fo] = sms if _ms: req['metadata_statements'] = Message(**_ms) if _ms_uri: req['metadata_statement_uris'] = Message(**_ms_uri) return req
def parse_args(): """ Parses command line args using argparse library """ usage = "Usage: create_concordance <infile> [<outfile>]" description = "Simple Concordance Generator" argparser = argparse.ArgumentParser( usage=usage, description=description) argparser.add_argument( 'infile', type=argparse.FileType('r'), help="File read in to create concordance") argparser.add_argument( 'outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout, help="File to write concordance to. " "Default is stdout") argparser.add_argument( '--word', nargs="?", const=str, help="Display a word in concordance") args = argparser.parse_args() return args
def addCommandLineArgs(arg_parser): """Add logging option to an ArgumentParser.""" arg_parser.register("action", "log_levels", LogLevelAction) arg_parser.register("action", "log_files", LogFileAction) arg_parser.register("action", "log_help", LogHelpAction) group = arg_parser.add_argument_group("Logging options") group.add_argument( "-l", "--log-level", dest="log_levels", action="log_levels", metavar="LOGGER:LEVEL", default=[], help="Set log levels for individual loggers. See --help-logging for " "complete details.") group.add_argument( "-L", "--log-file", dest="log_files", action="log_files", metavar="LOGGER:FILE", default=[], help="Set log the output file for individual loggers. " " See --help-logging for complete details.") group.add_argument("--help-logging", action="log_help", help=argparse.SUPPRESS)
def applyLoggingOpts(log_levels, log_files): """Apply logging options produced by LogLevelAction and LogFileAction. More often then not this function is not needed, the actions have already been taken during the parse, but it can be used in the case they need to be applied again (e.g. when command line opts take precedence but were overridded by a fileConfig, etc.). """ for l, lvl in log_levels: l.setLevel(lvl) for l, hdl in log_files: for h in l.handlers: l.removeHandler(h) l.addHandler(hdl)
def verbose(self, msg, *args, **kwargs): """Log msg at 'verbose' level, debug < verbose < info""" self.log(logging.VERBOSE, msg, *args, **kwargs)
def _aodata(echo, columns, xnxq=None, final_exam=False): """ 生成用于post的数据 :param echo: a int to check is response is write :type echo: int :param columns: 所有columns列名组成的list :type columns: list :param xnxq: str :type xnxq: string :param final_exam: 是否期末考试 :rtype: bool :return: a valid data for post to get data """ ao_data = [{"name": "sEcho", "value": echo}, {"name": "iColumns", "value": len(columns)}, {"name": "sColumns", "value": ""}, {"name": "iDisplayStart", "value": 0}, {"name": "iDisplayLength", "value": -1}, ] if xnxq: if final_exam: ao_data.append( {"name": "ksrwid", "value": "000000005bf6cb6f015bfac609410d4b"}) ao_data.append({"name": "xnxq", "value": xnxq}) for index, value in enumerate(columns): ao_data.append( {"name": "mDataProp_{}".format(index), "value": value}) ao_data.append( {"name": "bSortable_{}".format(index), "value": False}) return urlencode({"aoData": ao_data})
def login(self): """ 登陆系统,返回一个requests的session对象 :return: session with login cookies :rtype: requests.sessions.Session """ if not hasattr(self, 'session'): self.last_connect = time.time() s = requests.session() s.get('http://bkjws.sdu.edu.cn') data = { 'j_username': self.student_id, 'j_password': self.password_md5 } r6 = s.post('http://bkjws.sdu.edu.cn/b/ajaxLogin', headers={ 'user-agent': self._ua}, data=data) if r6.text == '"success"': return s else: s.close() raise AuthFailure(r6.text)
def get_lesson(self): """ 获取课表,返回一个列表,包含所有课表对象 :return: list of lessons :rtype: list[dict] """ html = self._get('http://bkjws.sdu.edu.cn/f/xk/xs/bxqkb') soup = BeautifulSoup(html, "html.parser") s = soup.find('table', attrs={"class": "table table-striped table-bordered table-hover", "id": "ysjddDataTableId"}) tr_box = s.find_all('tr') c = list() for les in tr_box[1:]: td_box = les.find_all('td') c.append({"lesson_num_long": td_box[1].text, "lesson_name": td_box[2].text, "lesson_num_short": td_box[3].text, "credit": td_box[4].text, "school": td_box[6].text, "teacher": td_box[7].text, "weeks": td_box[8].text, "days": td_box[9].text, "times": td_box[10].text, "place": td_box[11].text}) self._lessons = c return c
def lessons(self): """ 返回lessons,如果未调用过``get_lesson()``会自动调用 :return: list of lessons :rtype: list """ if hasattr(self, '_lessons'): return self._lessons else: self.get_lesson() return self._lessons
def detail(self): """ 个人信息,如果未调用过``get_detail()``会自动调用 :return: information of student :rtype: dict """ if hasattr(self, '_detail'): return self._detail else: self.get_detail() return self._detail
def get_detail(self): """ 个人信息,同时会把返回值保存在self.detail中 :return: information of student :rtype: dict """ response = self._post("http://bkjws.sdu.edu.cn/b/grxx/xs/xjxx/detail", data=None) if response['result'] == 'success': self._detail = response['object'] return self._detail else: self._unexpected(response)
def get_raw_past_score(self): """ 历年成绩查询的原始返回值,请使用get_past_score() :return: dict of the raw response of past score :rtype: dict """ echo = self._echo response = self._post("http://bkjws.sdu.edu.cn/b/cj/cjcx/xs/lscx", data=self._aodata(echo, columns=["xnxq", "kcm", "kxh", "xf", "kssj", "kscjView", "wfzjd", "wfzdj", "kcsx"])) if self._check_response(response, echo): self._raw_past_score = response return self._raw_past_score else: self._unexpected(response)
def get_comment_lesson_info(self): # 获取课程序列 """ 获取教学评估内所有需要课程 :return: 返回所以有需要进行教学评估的课程 :rtype: list """ echo = self._echo response = self._post('http://bkjws.sdu.edu.cn/b/pg/xs/list', data=self._aodata(echo, ['kch', 'kcm', 'jsm', 'function', 'function']), ) if self._check_response(response, echo=echo): return response['object']['aaData'] else: self._unexpected(response)
def get_exam_time(self, xnxq): """ 获取考试时间 :param xnxq: 学年学期 格式为 ``开始学年-结束学年-{1|2|3}`` 3为暑期学校 example:``2016-2017-2`` :type xnxq: str :return: list of exam time :rtype: list """ echo = self._echo response = self._post('http://bkjws.sdu.edu.cn/b/ksap/xs/vksapxs/pageList', data=self._aodata(echo, xnxq=xnxq, columns=["function", 'ksmc', 'kcm', 'kch', 'xqmc', 'jxljs', 'sjsj', "ksfsmc", "ksffmc", "ksbz"])) if self._check_response(response, echo): return response['object']['aaData'] else: self._unexpected(response)
def _letter_map(word): """Creates a map of letter use in a word. Args: word: a string to create a letter map from Returns: a dictionary of {letter: integer count of letter in word} """ lmap = {} for letter in word: try: lmap[letter] += 1 except KeyError: lmap[letter] = 1 return lmap
def anagrams_in_word(word, sowpods=False, start="", end=""): """Finds anagrams in word. Args: word: the string to base our search off of sowpods: boolean to declare TWL or SOWPODS words file start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yields: a tuple of (word, score) that can be made with the input_word """ input_letters, blanks, questions = blank_tiles(word) for tile in start + end: input_letters.append(tile) for word in word_list(sowpods, start, end): lmap = _letter_map(input_letters) used_blanks = 0 for letter in word: if letter in lmap: lmap[letter] -= 1 if lmap[letter] < 0: used_blanks += 1 if used_blanks > (blanks + questions): break else: used_blanks += 1 if used_blanks > (blanks + questions): break else: yield (word, word_score(word, input_letters, questions))
def asAMP(cls): """ Returns the exception's name in an AMP Command friendly format. For example, given a class named ``ExampleExceptionClass``, returns ``"EXAMPLE_EXCEPTION_CLASS"``. """ parts = groupByUpperCase(cls.__name__) return cls, "_".join(part.upper() for part in parts)
def transform_timeseries_data(timeseries, start, end=None): """Transforms a Go Metrics API metric result into a list of values for a given window period. start and end are expected to be Unix timestamps in microseconds. """ data = [] include = False for metric, points in timeseries.items(): for point in points: if point['x'] == start: include = True if include: data.append(point['y']) if end is not None and point['x'] == end: return data return data
def get_last_value_from_timeseries(timeseries): """Gets the most recent non-zero value for a .last metric or zero for empty data.""" if not timeseries: return 0 for metric, points in timeseries.items(): return next((p['y'] for p in reversed(points) if p['y'] > 0), 0)
def validate_page_number(number): """Validate the given 1-based page number.""" try: number = int(number) except (TypeError, ValueError): raise PageNotAnInteger('That page number is not an integer') if number < 1: raise EmptyPage('That page number is less than 1') return number
def get_page_of_iterator(iterator, page_size, page_number): """ Get a page from an interator, handling invalid input from the page number by defaulting to the first page. """ try: page_number = validate_page_number(page_number) except (PageNotAnInteger, EmptyPage): page_number = 1 start = (page_number - 1) * page_size # End 1 more than we need, so that we can see if there's another page end = (page_number * page_size) + 1 skipped_items = list(islice(iterator, start)) items = list(islice(iterator, end)) if len(items) == 0 and page_number != 1: items = skipped_items page_number = 1 has_next = len(items) > page_size items = items[:page_size] return NoCountPage(items, page_number, page_size, has_next)
def sh(cmd, escape=True): """ Executes the given command. returns a 2-tuple with returncode (integer) and OUTPUT (string) """ if escape: cmd = quote(cmd) process = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) output, unused_err = process.communicate() retcode = process.poll() return (retcode, output)
def gzip(filename): """ Gzip a file returns a 3-tuple with returncode (integer), terminal output (string) and the new filename. """ ## run gzip retcode, output = sh('gzip %s' % filename) new_filename = filename+'.gz' return (retcode, output, new_filename)
def tar(filename, dirs=[], gzip=False): """ Create a tar-file or a tar.gz at location: filename. params: gzip: if True - gzip the file, default = False dirs: dirs to be tared returns a 3-tuple with returncode (integer), terminal output (string) and the new filename. """ if gzip: cmd = 'tar czvf %s ' % filename else: cmd = 'tar cvf %s ' % filename if type(dirs) != 'list': dirs = [dirs] cmd += ' '.join(str(x) for x in dirs) retcode, output = sh(cmd) return (retcode, output, filename)
def chown(path, uid, guid, recursive=True): """ alternative to os.chown. wraps around unix chown example: chown('/tmp/test/', bob, bob) returns 2-tuple: exitcode and terminal output """ if recursive: cmd = 'chown -R %s:%s %s' % (uid, guid, path) else: cmd = 'chown %s:%s %s' % (uid, guid, path) return sh(cmd)
def chmod(path, mode, recursive=True): """ alternative to os. """ if recursive: cmd = 'chmod -R %s %s' % (mode, path) else: cmd = 'chmod %s %s' % (mode, path) return sh(cmd)
def handle_exception(self, exc_info=None, state=None, tags=None, return_feedback_urls=False, dry_run=False): """ Call this method from within a try/except clause to generate a call to Stack Sentinel. :param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself :param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included. :param tags: Any string tags you want associated with the exception report. :param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information. :param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests. """ if not exc_info: exc_info = sys.exc_info() if exc_info is None: raise StackSentinelError("handle_exception called outside of exception handler") (etype, value, tb) = exc_info try: msg = value.args[0] except: msg = repr(value) if not isinstance(tags, list): tags = [tags] limit = None new_tb = [] n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name tb = tb.tb_next n = n + 1 new_tb.append({'line': lineno, 'module': filename, 'method': name}) if state is None: state = {} if 'sys' not in state: try: state['sys'] = self._get_sys_info() except Exception as e: state['sys'] = '<Unable to get sys: %r>' % e if 'machine' not in state: try: state['machine'] = self._get_machine_info() except Exception as e: state['machine'] = '<Unable to get machine: %e>' % e if tags is None: tags = [] # The joy of Unicode if sys.version_info.major > 2: error_type = str(etype.__name__) error_message = str(value) else: error_type = unicode(etype.__name__) error_message = unicode(value) send_error_args = dict(error_type=error_type, error_message=error_message, traceback=new_tb, environment=self.environment, state=state, tags=self.tags + tags, return_feedback_urls=return_feedback_urls) if dry_run: return send_error_args else: return self.send_error(**send_error_args)
def send_error(self, error_type, error_message, traceback, environment, state, tags=None, return_feedback_urls=False): """ Sends error payload to Stack Sentinel API, returning a parsed JSON response. (Parsed as in, converted into Python dict/list objects) :param error_type: Type of error generated. (Eg, "TypeError") :param error_message: Message of error generated (Eg, "cannot concatenate 'str' and 'int' objects") :param traceback: List of dictionaries. Each dictionary should contain, "line", "method", and "module" keys. :param environment: Environment the error occurred in (eg, "devel")_ :param state: State of the application when the error happened. Could contain form data, cookies, etc. :param tags: Arbitrary tags you want associated with the error. list. :param return_feedback_urls: If True, return payload will offer URLs to send users to collect additional feedback for debugging. :return: Parsed return value from Stack Sentinel API """ (request, payload) = self._generate_request(environment, error_message, error_type, return_feedback_urls, state, tags, traceback) try: response = urlopen(request) except HTTPError as e: if e.code == 400: raise StackSentinelError(e.read()) else: raise if sys.version_info.major > 2: text_response = response.read().decode(response.headers.get_content_charset() or 'utf8') else: encoding = response.headers.get('content-type', '').split('charset=')[-1].strip() if encoding: text_response = response.read().decode('utf8', 'replace') else: text_response = response.read().decode(encoding) return json.loads(text_response)
def make_internal_signing_service(config, entity_id): """ Given configuration initiate an InternalSigningService instance :param config: The signing service configuration :param entity_id: The entity identifier :return: A InternalSigningService instance """ _args = dict([(k, v) for k, v in config.items() if k in KJ_SPECS]) _kj = init_key_jar(**_args) return InternalSigningService(entity_id, _kj)
def make_signing_service(config, entity_id): """ Given configuration initiate a SigningService instance :param config: The signing service configuration :param entity_id: The entity identifier :return: A SigningService instance """ _args = dict([(k, v) for k, v in config.items() if k in KJ_SPECS]) _kj = init_key_jar(**_args) if config['type'] == 'internal': signer = InternalSigningService(entity_id, _kj) elif config['type'] == 'web': _kj.issuer_keys[config['iss']] = _kj.issuer_keys[''] del _kj.issuer_keys[''] signer = WebSigningServiceClient(config['iss'], config['url'], entity_id, _kj) else: raise ValueError('Unknown signer type: {}'.format(config['type'])) return signer
def sign(self, req, receiver='', iss='', lifetime=0, sign_alg='', aud=None): """ Creates a signed JWT :param req: Original metadata statement as a :py:class:`MetadataStatement` instance :param receiver: The intended audience for the JWS :param iss: Issuer or the JWT :param lifetime: Lifetime of the signature :param sign_alg: Which signature algorithm to use :param aud: The audience, a list of receivers. :return: A signed JWT """ if not sign_alg: for key_type, s_alg in [('RSA', 'RS256'), ('EC', 'ES256')]: if self.keyjar.get_signing_key(key_type=key_type): sign_alg = s_alg break if not sign_alg: raise NoSigningKeys('Could not find any signing keys') return self.pack(req=req, receiver=receiver, iss=iss, lifetime=lifetime, sign=True, encrypt=False, sign_alg=sign_alg)
def create(self, req, **kwargs): """ Uses POST to send a first metadata statement signing request to a signing service. :param req: The metadata statement that the entity wants signed :return: returns a dictionary with 'sms' and 'loc' as keys. """ response = requests.post(self.url, json=req, **self.req_args()) return self.parse_response(response)
def update_metadata_statement(self, location, req): """ Uses PUT to update an earlier accepted and signed metadata statement. :param location: A URL to which the update request is sent :param req: The diff between what is registereed with the signing service and what it should be. :return: returns a dictionary with 'sms' and 'loc' as keys. """ response = requests.put(location, json=req, **self.req_args()) return self.parse_response(response)
def update_signature(self, location): """ Uses GET to get a newly signed metadata statement. :param location: A URL to which the request is sent :return: returns a dictionary with 'sms' and 'loc' as keys. """ response = requests.get(location, **self.req_args()) return self.parse_response(response)
def _yield_bundle_contents(self, data): """Yield bundle contents from the given dict. Each item yielded will be either a string representing a file path or a bundle.""" if isinstance(data, list): contents = data else: contents = data.get('contents', []) if isinstance(contents, six.string_types): contents = contents, for content in contents: if isinstance(content, dict): content = self._create_bundle(content) yield content
def _create_bundle(self, data): """Return a bundle initialised by the given dict.""" kwargs = {} filters = None if isinstance(data, dict): kwargs.update( filters=data.get('filters', None), output=data.get('output', None), debug=data.get('debug', None), extra=data.get('extra', {}), config=data.get('config', {}), depends=data.get('depends', None)) bundle = Bundle(*list(self._yield_bundle_contents(data)), **kwargs) return self._auto_filter_bundle(bundle)
def urls_for(self, asset_type, *args, **kwargs): """Returns urls needed to include all assets of asset_type """ return self.urls_for_depends(asset_type, *args, **kwargs) + \ self.urls_for_self(asset_type, *args, **kwargs)
def html_tags_for(self, asset_type, *args, **kwargs): """Return html tags for urls of asset_type """ html = [] for ref in self.depends: html.append(self._ref(ref).html_tags_for(asset_type, *args, **kwargs)) if asset_type in self.typed_bundles: html.append(render_asset_html_tags(asset_type, self.urls_for_self(asset_type, *args, **kwargs))) return "\n".join(html)
def html_tags(self, *args, **kwargs): """Return all html tags for all asset_type """ html = [] for asset_type in list_asset_types(): html.append(self.html_tags_for(asset_type.name, *args, **kwargs)) return "\n".join(html)
def find_version(filename): """Uses re to pull out the assigned value to __version__ in filename.""" with io.open(filename, encoding="utf-8") as version_file: version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file.read(), re.M) if version_match: return version_match.group(1) return "0.0-version-unknown"
def protocolise(url): """ Given a URL, check to see if there is an assocaited protocol. If not, set the protocol to HTTP and return the protocolised URL """ # Use the regex to match http//localhost/something protore = re.compile(r'https?:{0,1}/{1,2}') parsed = urlparse.urlparse(url) if not parsed.scheme and not protore.search(url): url = 'http://{0}'.format(url) return url
def find_links(url): """ Find the href destinations of all links at URL Arguments: - `url`: Return: list[str] Exceptions: None """ url = protocolise(url) content = requests.get(url).content flike = StringIO(content) root = html.parse(flike).getroot() atags = root.cssselect('a') hrefs = [a.attrib['href'] for a in atags] # !!! This does the wrong thing for bbc.co.uk/index.html hrefs = [h if h.startswith('http') else '/'.join([url, h]) for h in hrefs ] return hrefs
def _connected(client): """ Connected to AMP server, start listening locally, and give the AMP client a reference to the local listening factory. """ log.msg("Connected to AMP server, starting to listen locally...") localFactory = multiplexing.ProxyingFactory(client, "hello") return listeningEndpoint.listen(localFactory)
def getlist(self, section, option, *, raw=False, vars=None, fallback=None): """Return the [section] option values as a list. The list items must be delimited with commas and/or newlines. """ val = self.get(section, option, raw=raw, vars=vars, fallback=fallback) values = [] if val: for line in val.split("\n"): values += [s.strip() for s in line.split(",")] return values
def get_modules(self): """Get modules by project_abspath and packages_scan. Traverse all files under folder packages_scan which set by customer. And get all modules name. """ if not self.project_abspath: raise TypeError("project_abspath can not be empty.") packages_abspath = self.get_package_abspath() for package_abspath in packages_abspath: self.get_module_name(package_abspath) return self._modules
def import_modules(self): """Import customer's service module.""" modules = self.get_modules() log.info("import service modules: " + str(modules)) try: for module in modules: __import__(module) except ImportError as error: raise ImportModulesError(error.msg)
def to_dates(param): """ This function takes a date string in various formats and converts it to a normalized and validated date range. A list with two elements is returned, lower and upper date boundary. Valid inputs are, for example: 2012 => Jan 1 20012 - Dec 31 2012 (whole year) 201201 => Jan 1 2012 - Jan 31 2012 (whole month) 2012101 => Jan 1 2012 - Jan 1 2012 (whole day) 2011-2011 => same as "2011", which means whole year 2012 2011-2012 => Jan 1 2011 - Dec 31 2012 (two years) 201104-2012 => Apr 1 2011 - Dec 31 2012 201104-201203 => Apr 1 2011 - March 31 2012 20110408-2011 => Apr 8 2011 - Dec 31 2011 20110408-201105 => Apr 8 2011 - May 31 2011 20110408-20110507 => Apr 8 2011 - May 07 2011 2011- => Jan 1 2012 - Dec 31 9999 (unlimited) 201104- => Apr 1 2011 - Dec 31 9999 (unlimited) 20110408- => Apr 8 2011 - Dec 31 9999 (unlimited) -2011 Jan 1 0000 - Dez 31 2011 -201104 Jan 1 0000 - Apr 30, 2011 -20110408 Jan 1 0000 - Apr 8, 2011 """ pos = param.find('-') lower, upper = (None, None) if pos == -1: # no seperator given lower, upper = (param, param) else: lower, upper = param.split('-') ret = (expand_date_param(lower, 'lower'), expand_date_param(upper, 'upper')) return ret
def expand_date_param(param, lower_upper): """ Expands a (possibly) incomplete date string to either the lowest or highest possible contained date and returns datetime.datetime for that string. 0753 (lower) => 0753-01-01 2012 (upper) => 2012-12-31 2012 (lower) => 2012-01-01 201208 (upper) => 2012-08-31 etc. """ year = datetime.MINYEAR month = 1 day = 1 hour = 0 minute = 0 second = 0 if lower_upper == 'upper': year = datetime.MAXYEAR month = 12 day = 31 hour = 23 minute = 59 second = 59 if len(param) == 0: # leave defaults pass elif len(param) == 4: year = int(param) if lower_upper == 'lower': month = 1 day = 1 hour = 0 minute = 0 second = 0 else: month = 12 day = 31 hour = 23 minute = 59 second = 59 elif len(param) == 6: year = int(param[0:4]) month = int(param[4:6]) if lower_upper == 'lower': day = 1 else: (firstday, dayspermonth) = monthrange(year, month) day = dayspermonth elif len(param) == 8: year = int(param[0:4]) month = int(param[4:6]) day = int(param[6:8]) elif len(param) == 10: year = int(param[0:4]) month = int(param[4:6]) day = int(param[6:8]) hour = int(param[8:10]) elif len(param) == 12: year = int(param[0:4]) month = int(param[4:6]) day = int(param[6:8]) hour = int(param[8:10]) minute = int(param[10:12]) elif len(param) == 14: year = int(param[0:4]) month = int(param[4:6]) day = int(param[6:8]) hour = int(param[8:10]) minute = int(param[10:12]) second = int(param[12:14]) else: # wrong input length raise ValueError('Bad date string provided. Use YYYY, YYYYMM or YYYYMMDD.') # force numbers into valid ranges #print (param, lower_upper), [year, month, day, hour, minute, second] year = min(datetime.MAXYEAR, max(datetime.MINYEAR, year)) return datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second)
def select_fields(doc, field_list): ''' Take 'doc' and create a new doc using only keys from the 'fields' list. Supports referencing fields using dotted notation "a.b.c" so we can parse nested fields the way MongoDB does. The nested field class is a hack. It should be a sub-class of dict. ''' if field_list is None or len(field_list) == 0: return doc newDoc = Nested_Dict({}) oldDoc = Nested_Dict(doc) for i in field_list: if oldDoc.has_key(i): # print( "doc: %s" % doc ) # print( "i: %s" %i ) newDoc.set_value(i, oldDoc.get_value(i)) return newDoc.dict_value()
def date_map(doc, datemap_list, time_format=None): ''' For all the datetime fields in "datemap" find that key in doc and map the datetime object to a strftime string. This pprint and others will print out readable datetimes. ''' if datemap_list: for i in datemap_list: if isinstance(i, datetime): doc=CursorFormatter.date_map_field(doc, i, time_format=time_format) return doc
def printCursor(self, fieldnames=None, datemap=None, time_format=None): ''' Output a cursor to a filename or stdout if filename is "-". fmt defines whether we output CSV or JSON. ''' if self._format == 'csv': count = self.printCSVCursor(fieldnames, datemap, time_format) else: count = self.printJSONCursor( fieldnames, datemap, time_format) return count
def output(self, fieldNames=None, datemap=None, time_format=None): ''' Output all fields using the fieldNames list. for fields in the list datemap indicates the field must be date ''' count = self.printCursor(self._cursor, fieldNames, datemap, time_format)
def get_tasks(do_tasks, dep_graph): """Given a list of tasks to perform and a dependency graph, return the tasks that must be performed, in the correct order""" #XXX: Is it important that if a task has "foo" before "bar" as a dep, # that foo executes before bar? Why? ATM this may not happen. #Each task that the user has specified gets its own execution graph task_graphs = [] for task in do_tasks: exgraph = DiGraph() exgraph.add_node(task) _get_deps(task, exgraph, dep_graph) task_graphs.append(exgraph) return flatten(reversed(topological_sort(g)) for g in task_graphs)
def rotate(filename, targetdir, max_versions=None, archive_dir=None): """ Rotates a file. moves original file.ext to targetdir/file-YYYY-MM-DD-THH:MM:SS.ext deletes all older files matching the same pattern in targetdir that exceed the amount of max_versions. if versions = None, no old versions are deleted. if archive_dir is set, old versions are not deleted but moved to archive_dir """ dtimeformat = '%Y-%m-%d-%H:%M:%S' now = datetime.now().strftime(dtimeformat) old_path, old_filename = os.path.split(filename) fileroot, ext = old_filename.split(os.extsep, 1) new_filename = fileroot + '-' + now + '.' + ext new_filepath = os.path.join(targetdir, new_filename) if max_versions: # find all files with same pattern that already exist in targetdir old_files = {} for file in os.listdir(targetdir): pattern = re.compile( '^%s-(?P<date>\d{4}-\d{2}-\d{2}-\d{2}:\d{2}:\d{2}).%s' % (fileroot, ext)) if pattern.match(file): d = re.search(pattern, file).group('date') old_files[d] = file delkeys = old_files.keys() # sort delkeys by date, newest first delkeys.sort(key=lambda x: datetime.strptime(x, dtimeformat), reverse=True) # delete all keys, that should not be deleted del delkeys[0 : max_versions -1] # delete all not needed files for k in delkeys: fname = old_files[k] fpath = os.path.join(targetdir, fname) if archive_dir: shutil.move(fpath, os.path.join(archive_dir, fname)) else: os.remove(fpath) shutil.move(filename, new_filepath) pass else: shutil.move(filename, new_filepath) return True
def api_request(methods=None, require_token=True): """ View decorator that handles JSON based API requests and responses consistently. :param methods: A list of allowed methods :param require_token: Whether API token is checked automatically or not """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): ApiToken = apps.get_model('api', 'ApiToken') m = methods if methods is not None else DEFAULT_API_METHODS if request.method not in m: response = ApiResponse(False, 'Method not supported', status=405) response['Allow'] = ', '.join(methods) return response try: data = json.loads(request.body.decode('utf-8')) if request.body else {} if require_token: token_string = request.GET['token'] if request.method == 'GET' else data['token'] try: token = ApiToken.objects.get(token=token_string) token.save() # Update the last_seen field data['token'] = token except ApiToken.DoesNotExist: logger.exception('Valid token required, "{0}" supplied'.format(token_string)) return ApiResponse(False, 'Valid token required', status=403) return ApiResponse(data=view_func(request, data=data, *args, **kwargs)) except Exception as e: if e.__class__.__name__ == 'DoesNotExist': logger.exception('Not found while handling ajax request') return ApiResponse(False, 'Exception: {0}'.format(e), status=404) else: logger.exception('Error handling ajax request') return ApiResponse(False, 'Exception: {0}'.format(e), status=500) return _wrapped_view return decorator
def add_default_deps(project): """Add or create the default departments for the given project :param project: the project that needs default departments :type project: :class:`muke.models.Project` :returns: None :rtype: None :raises: None """ # create deps for project for name, short, order, af in DEFAULT_DEPARTMENTS: dep, created = Department.objects.get_or_create(name=name, short=short, ordervalue=order, assetflag=af) dep.projects.add(project) dep.full_clean() dep.save()
def add_default_atypes(project): """Add or create the default assettypes for the given project :param project: the project that needs default assettypes :type project: :class:`muke.models.Project` :returns: None :rtype: None :raises: None """ # create assettypes for project for name, desc in DEFAULT_ASSETTYPES: at, created = Atype.objects.get_or_create(name=name, defaults={'description': desc}) at.projects.add(project) at.full_clean() at.save()
def add_default_sequences(project): """Add or create the default sequences for the given project :param project: the project that needs default sequences :type project: :class:`muke.models.Project` :returns: None :rtype: None :raises: None """ # create sequences for project seqs = [(GLOBAL_NAME, 'global sequence for project %s' % project.name), (RNDSEQ_NAME, 'research and development sequence for project %s' % project.name)] for name, desc in seqs: seq, created = Sequence.objects.get_or_create(name=name, project=project, defaults={'description': desc})
def add_userrnd_shot(project): """Add a rnd shot for every user in the project :param project: the project that needs its rnd shots updated :type project: :class:`muke.models.Project` :returns: None :rtype: None :raises: None """ rndseq = project.sequence_set.get(name=RNDSEQ_NAME) users = [u for u in project.users.all()] for user in users: shot, created = Shot.objects.get_or_create(name=user.username, project=project, sequence=rndseq, defaults={'description': 'rnd shot for user %s' % user.username}) for t in shot.tasks.all(): t.users.add(user) t.full_clean() t.save()
def prj_post_save_handler(sender, **kwargs): """ Post save receiver for when a Project is saved. Creates a rnd shot for every user. On creations does: 1. create all default departments 2. create all default assettypes 3. create all default sequences :param sender: the project class :type sender: :class:`muke.models.Project` :returns: None :raises: None """ prj = kwargs['instance'] if not kwargs['created']: add_userrnd_shot(prj) return add_default_deps(prj) add_default_atypes(prj) add_default_sequences(prj)
def seq_post_save_handler(sender, **kwargs): """ Post save receiver for when a sequence is saved. creates a global shot. :param sender: the sequence class :type sender: :class:`muke.models.Sequence` :returns: None :raises: None """ if not kwargs['created']: return seq = kwargs['instance'] if seq.name == RNDSEQ_NAME: return prj = seq.project name = GLOBAL_NAME desc = "Global shot for sequence %s" % seq.name Shot.objects.create(name=name, project=prj, sequence=seq, description=desc)
def create_all_tasks(element): """Create all tasks for the element :param element: The shot or asset that needs tasks :type element: :class:`muke.models.Shot` | :class:`muke.models.Asset` :returns: None :rtype: None :raises: None """ prj = element.project if isinstance(element, Asset): flag=True else: flag=False deps = prj.department_set.filter(assetflag=flag) for d in deps: t = Task(project=prj, department=d, element=element) t.full_clean() t.save()
def path(self): """Return path :returns: path :rtype: str :raises: None """ p = os.path.normpath(self._path) if p.endswith(':'): p = p + os.path.sep return p
def path(self, value): """Set path :param value: The value for path :type value: str :raises: None """ prepval = value.replace('\\', '/') self._path = posixpath.normpath(prepval) if self._path.endswith(':'): self._path = self._path + posixpath.sep
def clean(self, ): """Reimplemented from :class:`models.Model`. Check if startframe is before endframe :returns: None :rtype: None :raises: ValidationError """ if self.startframe > self.endframe: raise ValidationError("Shot starts before it ends: Framerange(%s - %s)" % (self.startframe, self.endframe))
def path(self, value): """Set path :param value: The value for path :type value: str :raises: None """ prepval = value.replace('\\', '/') self._path = posixpath.normpath(prepval)
def register_type(self, typename): """ Registers a type name so that it may be used to send and receive packages. :param typename: Name of the packet type. A method with the same name and a "on_" prefix should be added to handle incomming packets. :raises ValueError: If there is a hash code collision. """ # this is to check for collisions only self._dummy_protocol.register_type(typename) self._typenames.add(typename)