text
stringlengths
8
6.05M
#!/usr/bin/env python def McNuggets(n): res = False for a in range(n/6+1): for b in range(n/9+1): for c in range(n/20+1): if (6*a + 9*b + 20*c) == n: res = True return res print McNuggets(15) print McNuggets(16) print McNuggets(6) print McNuggets(9) print McNuggets(20) print McNuggets(35) #print McNuggets(16)
#!/usr/bin/env python """ pyjld.system: various system level utilities (e.g. daemon, cross-platform registry, command-line tools) This package contains various system level utilities. ========= Changelog ========= *0.5* * command.ui: ref_options are now optional *0.4* * Added ''proxy'' module * Corrected "cmd_restart" from daemon module * Added status return code to ui.handleError method *0.3* * Modified BaseCmdUI.handleArgument: integrates the templating for usage message *0.2* * Added command-line related utilities * Added a cross-platform ``registry`` utility """ __author__ = "Jean-Lou Dupont" __email = "python (at) jldupont.com" __version__ = "0.5" __fileid = "$Id$" __classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: Public Domain', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', ] __dependencies = ['python_daemon',]
import logging import sys import inject sys.path.insert(0,'../../../python') from model.config import Config logging.getLogger().setLevel(logging.DEBUG) from autobahn.asyncio.wamp import ApplicationSession from asyncio import coroutine ''' python3 getOvertimeRequestsByState.py userId #retorna los requerimientos de todos los usuarios python3 getOvertimeRequestsByState.py userId state1 state2 state3 ... #retorna los requerimientos de los usuarios con el id pasado como parametro python3 getOvertimeRequestsByState.py e43e5ded-e271-4422-8e85-9f1bc0a61235 #retorna los requerimientos de los usuarios con el id pasado como parametro python3 getOvertimeRequestsByState.py e43e5ded-e271-4422-8e85-9f1bc0a61235 APPROVED #retorna los requerimientos de los usuarios con el id pasado como parametro ''' def config_injector(binder): binder.bind(Config,Config('server-config.cfg')) inject.configure(config_injector) config = inject.instance(Config) class WampMain(ApplicationSession): def __init__(self,config=None): logging.debug('instanciando WampMain') ApplicationSession.__init__(self, config) @coroutine def onJoin(self, details): logging.info("********** REQUERIMIENTOS DE HORAS EXTRAS DE LOS USUARIOS **********") if len(sys.argv) < 2: sys.exit("Error de parámetros") user = sys.argv[1] logging.info("********** USUARIO: " + user + " **********") users = [user] states = [] if len(sys.argv) > 2: for i in range(2 , len(sys.argv)): if sys.argv[i] != 'APPROVED' and sys.argv[i] != 'PENDING' and sys.argv[i] != 'REJECTED': sys.exit("Error de parámetros") states.append(sys.argv[i]) requests = yield from self.call('overtime.getOvertimeRequests', users, states) for request in requests: logging.info(request) if __name__ == '__main__': from autobahn.asyncio.wamp import ApplicationRunner from autobahn.wamp.serializer import JsonSerializer url = config.configs['server_url'] realm = config.configs['server_realm'] debug = config.configs['server_debug'] json = JsonSerializer() runner = ApplicationRunner(url=url,realm=realm,debug=debug, debug_wamp=debug, debug_app=debug, serializers=[json]) runner.run(WampMain)
def validate_row(row): return len(row) == 9 and len(set(row)) == 9 and all(1 <= e <= 9 for e in row) def validate_rows(matrix): result = True for row in matrix: result = result and validate_row(row) return result def validate_columns(matrix): return validate_rows(list(zip(*matrix))) def validate_tiles(matrix): length = int(len(matrix) / 3) row_start = 0 for row_end in range(length, len(matrix) + 1, length): col_start = 0 for col_end in range(length, len(matrix) + 1, length): m = list() for row in range(row_start, row_end): for col in range(col_start, col_end): m.append(matrix[row][col]) if not validate_row(m): return False col_start = col_end row_start = row_end return True def validate(matrix): return validate_rows(matrix) and validate_columns(matrix) and validate_tiles(matrix)
# Driver code my_list = [56,345,78,23,98,34,65,85] for i in range(1, len(my_list)): #The first value after the sorted array key = my_list[i] # Values that are greater than the key value ... # .. move one index forward j = i - 1 while j>=0 and key<=my_list[j]: my_list[j + 1] = my_list[j] j -= 1 my_list[j + 1] = key #Prints Array print("Sorted array:", my_list)
# -*- coding: utf-8 -*- # @Time : 2019/10/14 12:25 # @Author : Weiyang # @File : Segmentation.py #================================================================================================================== # 分词器:隐马尔可夫模型和字典匹配两种方式,其中,隐马尔可夫模型又分为 监督学习模型 和 无监督学习模型 #================================================================================================================== from HMM import HMM from dict_match import dict_match import numpy as np class Segmentation(object): '''分词器''' def __init__(self,model='S'): self.model = model # 选择分词的方式,有三种方式,无监督HMM模型,有监督HMM模型,字典匹配 # 取值分别为:'U','S','D' self.char2code = dict() # 字符到编码的映射 self.code2char = dict() # 编码到字符的映射 self.code2latentState = dict() # 编码到隐状态的映射 self.init_prob_dist = None # 初始概率矩阵 self.state_trans_matrix = None # 状态转移概率矩阵 self.emission_matrix = None # 观测概率矩阵 self.words_dict = [] # 存储分词词典,用于字典匹配分词 self._readParameter() def _readParameter(self): # 使用有监督学习的参数 if self.model == 'S': with open('../matrix/S_char_code.txt','r',encoding='utf-8') as fi: for line in fi: # 如果遇到:符号 if line.count(':') > 1: _, _,code = line.strip().split(':') ch = ':' code = int(code) else: ch,code = line.strip().split(':') code = int(code) self.char2code[ch] = code self.code2char[code] = ch with open('../matrix/S_latent_state_code.txt','r',encoding='utf-8') as fi: for line in fi: # 如果遇到:符号 if line.count(':') > 1: _, _,code = line.strip().split(':') ch = ':' code = int(code) else: ch,code = line.strip().split(':') code = int(code) self.code2latentState[code] = ch matrix = np.load('../matrix/S_init_prob_dist.npz') self.init_prob_dist = matrix['init_prob_dist'] matrix = np.load('../matrix/S_state_trans_matrix.npz') self.state_trans_matrix = matrix['state_trans_matrix'] matrix = np.load('../matrix/S_emission_matrix.npz') self.emission_matrix = matrix['emission_matrix'] elif self.model == 'U': with open('../matrix/U_char_code.txt', 'r', encoding='utf-8') as fi: for line in fi: # 如果遇到:符号 if line.count(':') > 1: _, _,code = line.strip().split(':') ch = ':' code = int(code) else: ch, code = line.strip().split(':') code = int(code) self.char2code[ch] = code self.code2char[code] = ch with open('../matrix/U_latent_state_code.txt', 'r', encoding='utf-8') as fi: for line in fi: # 如果遇到:符号 if line.count(':') > 1: _, _,code = line.strip().split(':') ch = ':' code = int(code) else: ch, code = line.strip().split(':') code = int(code) self.code2latentState[code] = ch matrix = np.load('../matrix/U_init_prob_dist.npz') self.init_prob_dist = matrix['init_prob_dist'] matrix = np.load('../matrix/U_state_trans_matrix.npz') self.state_trans_matrix = matrix['state_trans_matrix'] matrix = np.load('../matrix/U_emission_matrix.npz') self.emission_matrix = matrix['emission_matrix'] elif self.model == 'D': with open('../data/word_dict.txt','r',encoding='utf-8') as fi: for line in fi: self.words_dict += line.strip().split() def cut(self,inputs): '''inputs是输入数据,形式为中文字符序列,eg: 我今天很高兴!哈哈。。。。''' if self.model == 'D': result = dict_match(inputs,self.words_dict) else: model = HMM(num_latent_states=len(self.code2latentState), num_observation_states=len(self.char2code), init_prob_dist=self.init_prob_dist, state_trans_matrix=self.state_trans_matrix, emission_matrix=self.emission_matrix) # 将输入数据,转为数字编码列表 new_input = [self.char2code[ch] for ch in inputs] result = model.viterbi([new_input]) result = [self.code2latentState[code] for code in result[0]] result = self.formatResult(inputs,result) return result def formatResult(self,raw_input,result): ''' raw_input: 是一个字符序列,即字符串; result是一个隐状态序列,eg: ['B','M',...]; 本函数的目的是输出分词后的结果''' words = [] word = '' # 临时存储字符 for i,(ch,flag) in enumerate(zip(raw_input,result)): if flag == 'B': # 如果遇到'B',则将先前的字符组成的词语加入结果中,并重置为空 if word != '': words.append(word) word = '' word += ch elif flag == 'M': word += ch elif flag == 'E': word += ch if word != '': words.append(word) word = '' elif flag == 'S': if word != '': words.append(word) words.append(ch) word = '' else: if ch != '': words.append(ch) return words if __name__ == '__main__': print('有监督学习HMM模型的分词结果:') model = Segmentation(model='S') result = model.cut('1997年,是中国发展历史上非常重要的很不平凡的一年。') print('\t',result) print() result = model.cut('据环球时报了解,特朗普总统星期五会见刘鹤副总理时说,对美墨加达成贸易协定,' '市场没什么反应,但是美中谈判一有积极进展,股市立刻上涨,市场反响强烈,这一次又是这样。') print('\t', result) print() #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ print('基于字典最大正向匹配的分词结果:') model = Segmentation(model='D') result = model.cut('1997年,是中国发展历史上非常重要的很不平凡的一年。') print('\t',result) print() result = model.cut('据环球时报了解,特朗普总统星期五会见刘鹤副总理时说,对美墨加达成贸易协定,' '市场没什么反应,但是美中谈判一有积极进展,股市立刻上涨,市场反响强烈,这一次又是这样。') print('\t', result)
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import argparse import logging import os import shutil import subprocess from dataclasses import dataclass from textwrap import dedent from typing import Generic, Sequence, Type, TypeVar, cast from pants.backend.cc.lint.clangformat.subsystem import ClangFormat from pants.backend.codegen.avro.java.subsystem import AvroSubsystem from pants.backend.codegen.protobuf.java.subsystem import JavaProtobufGrpcSubsystem from pants.backend.codegen.protobuf.python.python_protobuf_subsystem import PythonProtobufMypyPlugin from pants.backend.codegen.protobuf.scala.subsystem import ScalaPBSubsystem from pants.backend.codegen.thrift.scrooge.subsystem import ScroogeSubsystem from pants.backend.docker.subsystems.dockerfile_parser import DockerfileParser from pants.backend.helm.subsystems.k8s_parser import HelmKubeParserSubsystem from pants.backend.helm.subsystems.post_renderer import HelmPostRendererSubsystem from pants.backend.java.lint.google_java_format.subsystem import GoogleJavaFormatSubsystem from pants.backend.java.subsystems.junit import JUnit from pants.backend.kotlin.lint.ktlint.subsystem import KtlintSubsystem from pants.backend.python.goals.coverage_py import CoverageSubsystem from pants.backend.python.lint.add_trailing_comma.subsystem import AddTrailingComma from pants.backend.python.lint.autoflake.subsystem import Autoflake from pants.backend.python.lint.bandit.subsystem import Bandit from pants.backend.python.lint.black.subsystem import Black from pants.backend.python.lint.docformatter.subsystem import Docformatter from pants.backend.python.lint.flake8.subsystem import Flake8 from pants.backend.python.lint.isort.subsystem import Isort from pants.backend.python.lint.pydocstyle.subsystem import Pydocstyle from pants.backend.python.lint.pylint.subsystem import Pylint from pants.backend.python.lint.pyupgrade.subsystem import PyUpgrade from pants.backend.python.lint.ruff.subsystem import Ruff from pants.backend.python.lint.yapf.subsystem import Yapf from pants.backend.python.packaging.pyoxidizer.subsystem import PyOxidizer from pants.backend.python.subsystems.debugpy import DebugPy from pants.backend.python.subsystems.ipython import IPython from pants.backend.python.subsystems.pytest import PyTest from pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase from pants.backend.python.subsystems.setuptools import Setuptools from pants.backend.python.subsystems.setuptools_scm import SetuptoolsSCM from pants.backend.python.subsystems.twine import TwineSubsystem from pants.backend.python.typecheck.mypy.subsystem import MyPy from pants.backend.python.typecheck.pytype.subsystem import Pytype from pants.backend.scala.lint.scalafmt.subsystem import ScalafmtSubsystem from pants.backend.scala.subsystems.scalatest import Scalatest from pants.backend.terraform.dependency_inference import TerraformHcl2Parser from pants.backend.tools.semgrep.subsystem import SemgrepSubsystem from pants.backend.tools.yamllint.subsystem import Yamllint from pants.base.build_environment import get_buildroot from pants.jvm.resolve.jvm_tool import JvmToolBase from pants.jvm.shading.jarjar import JarJar from pants.util.contextutil import temporary_dir from pants.util.dirutil import touch logger = logging.getLogger(__name__) default_python_interpreter_constraints = "CPython>=3.7,<4" ToolBaseT = TypeVar("ToolBaseT") @dataclass class Tool(Generic[ToolBaseT]): cls: Type[ToolBaseT] backend: str @property def name(self) -> str: return cast(str, self.cls.options_scope) # type: ignore[attr-defined] @property def resolve(self) -> str: return self.name @property def lockfile_name(self) -> str: return f"{self.name}.lock" @dataclass class PythonTool(Tool[PythonToolRequirementsBase]): interpreter_constraints: str = default_python_interpreter_constraints @dataclass class JvmTool(Tool[JvmToolBase]): ... all_python_tools = tuple( sorted( [ PythonTool( AddTrailingComma, "pants.backend.experimental.python.lint.add_trailing_comma" ), PythonTool(Autoflake, "pants.backend.python.lint.autoflake"), PythonTool(Bandit, "pants.backend.python.lint.bandit"), PythonTool(Black, "pants.backend.python.lint.black"), PythonTool(ClangFormat, "pants.backend.experimental.cc.lint.clangformat"), PythonTool(CoverageSubsystem, "pants.backend.python"), PythonTool(DebugPy, "pants.backend.python"), PythonTool(Docformatter, "pants.backend.python.lint.docformatter"), PythonTool(DockerfileParser, "pants.backend.docker"), PythonTool(Flake8, "pants.backend.python.lint.flake8"), PythonTool(HelmKubeParserSubsystem, "pants.backend.experimental.helm"), PythonTool(HelmPostRendererSubsystem, "pants.backend.experimental.helm"), PythonTool(IPython, "pants.backend.python"), PythonTool(Isort, "pants.backend.python.lint.isort"), PythonTool(MyPy, "pants.backend.python.typecheck.mypy"), PythonTool(Pydocstyle, "pants.backend.python.lint.pydocstyle"), PythonTool(PyTest, "pants.backend.python"), PythonTool(PyUpgrade, "pants.backend.python.lint.pyupgrade"), PythonTool(Pylint, "pants.backend.python.lint.pylint"), PythonTool(PythonProtobufMypyPlugin, "pants.backend.codegen.protobuf.python"), PythonTool(Pytype, "pants.backend.python.typecheck.pytype", "CPython>=3.7,<3.11"), PythonTool(PyOxidizer, "pants.backend.experimental.python.packaging.pyoxidizer"), PythonTool(Ruff, "pants.backend.experimental.python.lint.ruff"), PythonTool(SemgrepSubsystem, "pants.backend.experimental.tools.semgrep"), PythonTool(Setuptools, "pants.backend.python"), PythonTool(SetuptoolsSCM, "pants.backend.python"), PythonTool(TerraformHcl2Parser, "pants.backend.experimental.terraform"), PythonTool(TwineSubsystem, "pants.backend.python"), PythonTool(Yamllint, "pants.backend.experimental.tools.yamllint"), PythonTool(Yapf, "pants.backend.python.lint.yapf"), ], key=lambda tool: tool.name, ) ) all_jvm_tools = tuple( sorted( [ JvmTool(AvroSubsystem, "pants.backend.experimental.codegen.avro.java"), JvmTool( GoogleJavaFormatSubsystem, "pants.backend.experimental.java.lint.google_java_format" ), JvmTool(JUnit, "pants.backend.experimental.java"), JvmTool(JarJar, "pants.backend.experimental.java"), JvmTool(JavaProtobufGrpcSubsystem, "pants.backend.experimental.codegen.protobuf.java"), JvmTool(KtlintSubsystem, "pants.backend.experimental.kotlin.lint.ktlint"), JvmTool(ScalaPBSubsystem, "pants.backend.experimental.codegen.protobuf.scala"), JvmTool(ScalafmtSubsystem, "pants.backend.experimental.scala.lint.scalafmt"), JvmTool(Scalatest, "pants.backend.experimental.scala"), JvmTool(ScroogeSubsystem, "pants.backend.experimental.codegen.thrift.scrooge.scala"), ], key=lambda tool: tool.name, ) ) name_to_tool = {tool.name: tool for tool in (all_python_tools + all_jvm_tools)} def create_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description="Generate the tool lockfiles that we distribute with Pants.", ) parser.add_argument( "tool", nargs="*", metavar="tool", # A quirk of argparse is that an empty list must be provided as one of the choices # to allow an empty list when nargs="*". choices=sorted(name_to_tool.keys()) + [[]], help="Regenerate this builtin tool lockfile", ) parser.add_argument( "--all-python", action="store_true", help="Regenerate all builtin Python tool lockfiles." ) parser.add_argument( "--all-jvm", action="store_true", help="Regenerate all builtin JVM tool lockfiles." ) parser.add_argument( "--dry-run", action="store_true", help="Show Pants commands that would be run." ) parser.add_argument( "-d", "--debug", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.INFO, ) return parser def generate_python_tool_lockfiles(tools: Sequence[PythonTool], dry_run: bool) -> None: def req_file(_tool: PythonTool) -> str: return f"{_tool.name}-requirements.txt" # Generate the builtin lockfiles via temporary named resolves in a tmp repo. # This is to completely disassociate the generation of builtin lockfiles from # the consumption of lockfiles in the Pants repo. with temporary_dir() as tmp_buildroot: for tool in tools: with open(os.path.join(tmp_buildroot, req_file(tool)), "w") as reqs_file: for req_str in tool.cls.default_requirements: reqs_file.write(req_str) reqs_file.write("\n") with open(os.path.join(tmp_buildroot, "BUILD"), "w") as build_file: for tool in tools: build_file.write( dedent( f"""\ python_requirements( name="{tool.name}_reqs", source="{req_file(tool)}", resolve="{tool.resolve}", ) """ ) ) resolves = {tool.resolve: tool.lockfile_name for tool in tools} resolves_to_ics = {tool.resolve: [tool.interpreter_constraints] for tool in tools} for file in resolves.values(): touch(os.path.join(tmp_buildroot, file)) # Prevent "Unmatched glob" warning. python_args = [ # Regardless of the backend the tool is defined in, we need the Python backend # for the Python resolves mechanism to work. "--backend-packages=pants.backend.python", "--python-pip-version=latest", f"--python-interpreter-constraints=['{default_python_interpreter_constraints}']", "--python-enable-resolves", # Unset any existing resolve names in the Pants repo, and set to just our temporary ones. f"--python-resolves={resolves}", f"--python-resolves-to-interpreter-constraints={resolves_to_ics}", # Blank these out in case the Pants repo sets them using resolve names that we've unset. "--python-resolves-to-constraints-file={}", "--python-resolves-to-no-binary={}", "--python-resolves-to-only-binary={}", ] generate(tmp_buildroot, tools, python_args, dry_run) def generate_jvm_tool_lockfiles(tools: Sequence[JvmTool], dry_run: bool) -> None: # Generate the builtin lockfiles via temporary named resolves in a tmp repo. # This is to completely disassociate the generation of builtin lockfiles from # the consumption of lockfiles in the Pants repo. with temporary_dir() as tmp_buildroot: jvm_args = [] for tool in tools: jvm_args.extend( [ f"--{tool.name}-version={tool.cls.default_version}", f"--{tool.name}-artifacts={tool.cls.default_artifacts}", f"--{tool.name}-lockfile={tool.lockfile_name}", ] ) generate(tmp_buildroot, tools, jvm_args, dry_run) def generate(buildroot: str, tools: Sequence[Tool], args: Sequence[str], dry_run: bool) -> None: pants_repo_root = get_buildroot() touch(os.path.join(buildroot, "pants.toml")) backends = sorted({tool.backend for tool in tools}) custom_cmd = "./pants run build-support/bin/generate_builtin_lockfiles.py" args = [ os.path.join(pants_repo_root, "pants"), "--concurrent", "--anonymous-telemetry-enabled=false", f"--backend-packages={backends}", *args, f"--generate-lockfiles-custom-command={custom_cmd}", "generate-lockfiles", *[f"--resolve={tool.resolve}" for tool in tools], ] if dry_run: logger.info("Would run: " + " ".join(args)) return logger.debug("Running: " + " ".join(args)) subprocess.run(args, cwd=buildroot, check=True) # Copy the generated lockfiles from the tmp repo to the Pants repo. for tool in tools: lockfile_pkg, lockfile_filename = tool.cls.default_lockfile_resource lockfile_dest = os.path.join( "src", "python", lockfile_pkg.replace(".", os.path.sep), lockfile_filename, ) shutil.copy(os.path.join(buildroot, tool.lockfile_name), lockfile_dest) def main() -> None: parser = create_parser() args = parser.parse_args() logging.basicConfig( level=args.loglevel, format="%(asctime)s.%(msecs)02d [%(levelname)s] %(message)s", datefmt="%I:%M:%S", ) python_tools = [] jvm_tools = [] for name in args.tool: tool = name_to_tool[name] if isinstance(tool, PythonTool): python_tools.append(tool) elif isinstance(tool, JvmTool): jvm_tools.append(tool) else: raise ValueError(f"Tool {name} has unknown type.") if args.all_python: python_tools.extend(all_python_tools) if args.all_jvm: jvm_tools.extend(all_jvm_tools) if not python_tools and not jvm_tools: raise ValueError( "Must specify at least one tool, either via positional args, " "or via the --all-python/--all-jvm flags." ) if python_tools: generate_python_tool_lockfiles(python_tools, args.dry_run) if jvm_tools: generate_jvm_tool_lockfiles(jvm_tools, args.dry_run) if __name__ == "__main__": main()
import time from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2, acos, asin from random import * import numpy from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox from mcplatform import * import Queue import utilityFunctions from helper import * from ChunkAnalysis import * def createHighways(level, box, segments, chunkMap, material, stairsId, entryPoints): def createHighway(roadWidth, allDistrictChunks, highwayNodes): class RoadNode: def __init__(self, x, z, goals, segmentIdentifier, roadWidth, prior = None, additionalCost = 0): self.x = x self.z = z #self.y = getGroundYPos(x, z) self.width = roadWidth self.medianY, self.stdDev = self.getYStats() self.prior = prior if prior is None: self.g = 0 else: self.g = prior.g + additionalCost self.g += abs(self.medianY - prior.medianY) self.g += self.stdDev # Calculating h (heuristic) self.h = 999999 for goal in goals: estimateToGoal = max(abs(goal[0] - self.x), abs(goal[1] - self.z)) if estimateToGoal < self.h: self.h = estimateToGoal # Setting f (expected total cost to the closest goal) self.f = self.g + self.h if prior is None: self.deltaX = 0 self.deltaZ = 0 self.segmentIdentifier = segmentIdentifier else: self.deltaX = self.x - prior.x self.deltaZ = self.z - prior.z self.segmentIdentifier = prior.segmentIdentifier self.waterFraction = self.countWater() / (roadWidth ** 2.0) def clearHistory(self): self.prior = None self.g = self.stdDev self.f = self.g + self.h self.deltaX = 0 self.deltaZ = 0 def countWater(self): count = 0 for x in xrange(self.x, self.x + roadWidth): for z in xrange(self.z, self.z + roadWidth): y = getGroundYPos(x, z) id = level.blockAt(x, y, z) if id == 8 or id == 9: # If liquid water count += 1 return count def getYStats(self): yPositions = [] for x in xrange(self.x, self.x + roadWidth): for z in xrange(self.z, self.z + roadWidth): yPositions.append(getGroundYPos(x, z)) return numpy.median(yPositions), numpy.std(yPositions) @staticmethod def getSuccessorAttributes(): # Successor attributes is just to make it easier to process successors (it lists deltaX, deltaZ, and cost from parent) successorAttributes = [(-1, 0, 1), (0, -1, 1), (0, 1, 1), (1, 0, 1)] if roadWidth > 1: # Can only move diagonally if road width is greater than 1 successorAttributes += [(-1, -1, 1.5), (-1, 1, 1.5), (1, -1, 1.5), (1, 1, 1.5)] # Scaling successor attributes by the road width for i in xrange(len(successorAttributes)): successorAttributes[i] = (successorAttributes[i][0] * roadWidth, successorAttributes[i][1] * roadWidth, successorAttributes[i][2] * roadWidth) return successorAttributes def getSuccessors(self, successorAttributes, goals): successors = [] for i in successorAttributes: # If the successor is within the box's bounds if box.minx <= (self.x + i[0]) < box.maxx - roadWidth and box.minz <= (self.z + i[1]) < box.maxz - roadWidth: candidate = RoadNode(self.x + i[0], self.z + i[1], goals, self.segmentIdentifier, self.width, self, i[2]) if (candidate.medianY - self.medianY) <= roadWidth: if self.deltaX == 0 and self.deltaZ == 0: successors.append(candidate) # If self is mostly over water, only add candidates whos deltaX and deltaZ are equal to self's elif self.waterFraction > 0.5: if self.deltaX == candidate.deltaX and self.deltaZ == candidate.deltaZ: successors.append(candidate) # Can only go in a direction that is 45 degrees from the current direction elif roadWidth == 1: # Unless the road width is only 1 successors.append(candidate) else: if self.deltaX == 0 and self.deltaZ > 0: if candidate.deltaZ > 0: successors.append(candidate) elif self.deltaX > 0 and self.deltaZ > 0: if candidate.deltaX >= 0 and candidate.deltaZ >= 0: successors.append(candidate) elif self.deltaX > 0 and self.deltaZ == 0: if candidate.deltaX > 0: successors.append(candidate) elif self.deltaX > 0 and self.deltaZ < 0: if candidate.deltaX >= 0 and candidate.deltaZ <= 0: successors.append(candidate) elif self.deltaX == 0 and self.deltaZ < 0: if candidate.deltaZ < 0: successors.append(candidate) elif self.deltaX < 0 and self.deltaZ < 0: if candidate.deltaX <= 0 and candidate.deltaZ <= 0: successors.append(candidate) elif self.deltaX < 0 and self.deltaZ == 0: if candidate.deltaX < 0: successors.append(candidate) else: if candidate.deltaX <= 0 and candidate.deltaZ >= 0: successors.append(candidate) return successors def regeneratePath(self, path = []): path.append(self) if self.prior is None: return path else: return self.prior.regeneratePath(path) def __lt__(self, other): return self.f < other.f def __hash__(self): return self.x + (512 * self.z) def __eq__(self, other): return self.x == other.x and self.z == other.z def getPath(startingChunk, endingChunks, startingSegment, otherSegments): successorAttributes = RoadNode.getSuccessorAttributes() goals = [] for endingChunk in endingChunks: goals.append((endingChunk.box.minx + 8 - (roadWidth / 2), endingChunk.box.minz + 8 - (roadWidth / 2), endingChunk)) resets = [] for i in highwayNodes: if i.segmentIdentifier in startingSegment: resets.append((i.x, i.z, i.width)) else: goals.append((i.x + (i.width / 2), i.z + (i.width / 2), i.segmentIdentifier)) originX = startingChunk.box.minx + 8 - (roadWidth / 2) originZ = startingChunk.box.minz + 8 - (roadWidth / 2) originY = getGroundYPos(originX, originZ) openList = Queue.PriorityQueue() openList.put(RoadNode(originX, originZ, goals, startingChunk, roadWidth)) closedSet = set() goalFound = None while openList.qsize() > 0: current = openList.get() # If we already checked this node, fetch the next best node in the open list # This check is necessary because when adding successors, we can't tell if it was already in the open list # Therefore, we will check if a better candidate at its position was already processed if current in closedSet: continue # Checking if a goal is within the road for goal in goals: if current.x <= goal[0] < current.x + roadWidth and current.z <= goal[1] < current.z + roadWidth: goalFound = goal[2] break if goalFound is not None: break # Checking if current overlaps with a chunk in another segment currentChunks = getChunksOverlappedWithBox(current.x, current.z, roadWidth) for i in currentChunks: if i in otherSegments: goalFound = i break if goalFound is not None: break # Checking if current and prior overlap with a chunk in startingSegment; if so, delete prior history from current if current.prior is not None: inStartingSegment = False for i in currentChunks: if i in startingSegment: inStartingSegment = True break if inStartingSegment: inStartingSegment = False priorChunks = getChunksOverlappedWithBox(current.prior.x, current.prior.z, roadWidth) for i in priorChunks: inStartingSegment = True break if inStartingSegment: # Remove history from current current.clearHistory() # Checking if we should restart the search from one of our previous roads we are near for i in resets: # If aligned horizontally if i[0] <= current.x <= i[0]+i[2] or i[0] <= current.x+roadWidth <= i[0]+i[2]: # If aligned vertically if i[1] <= current.z <= i[1]+i[2] or i[1] <= current.z+roadWidth <= i[1]+i[2]: # The overlap; clear history current.clearHistory() break # Adding current to the closed set closedSet.add(current) # Adding successors to the open list successors = current.getSuccessors(successorAttributes, goals) for i in successors: if i not in closedSet: openList.put(i) if goalFound is not None: return current.regeneratePath(), goalFound else: return [], None # Gets a list of all blocks along the path that will make up the road def getCompletePathCoordinates(path): pathCoordinates = [] for i in xrange(len(path)): for xOffset in xrange(roadWidth): for zOffset in xrange(roadWidth): x = path[i].x + xOffset z = path[i].z + zOffset # Smoothing out the road's height # if path[i].deltaX > 0: # if xOffset < roadWidth / 2: # y = lerpInt(path[i - 1].medianY, path[i].medianY, (ceil(roadWidth / 2) + xOffset) / float(roadWidth)) # elif i < len(path) - 1: # y = lerpInt(path[i].medianY, path[i + 1].medianY, (xOffset - (roadWidth / 2)) / float(roadWidth)) # else: # y = path[i].medianY # elif path[i].deltaX < 0: # if xOffset >= roadWidth / 2: # y = lerpInt(path[i].medianY, path[i - 1].medianY, (xOffset - (roadWidth / 2)) / float(roadWidth)) # elif i < len(path) - 1: # y = lerpInt(path[i + 1].medianY, path[i].medianY, (ceil(roadWidth / 2) + xOffset) / float(roadWidth)) # else: # y = path[i].medianY # elif path[i].deltaZ > 0: # if zOffset < roadWidth / 2: # y = lerpInt(path[i - 1].medianY, path[i].medianY, (ceil(roadWidth / 2) + zOffset) / float(roadWidth)) # elif i < len(path) - 1: # y = lerpInt(path[i].medianY, path[i + 1].medianY, (zOffset - (roadWidth / 2)) / float(roadWidth)) # else: # y = path[i].medianY # elif path[i].deltaZ < 0: # if zOffset >= roadWidth / 2: # y = lerpInt(path[i].medianY, path[i - 1].medianY, (zOffset - (roadWidth / 2)) / float(roadWidth)) # elif i < len(path) - 1: # y = lerpInt(path[i + 1].medianY, path[i].medianY, (ceil(roadWidth / 2) + zOffset) / float(roadWidth)) # else: # y = path[i].medianY # else: # if i < len(path) - 1: # if path[i + 1].deltaX > 0: # if xOffset >= roadWidth / 2: # y = lerpInt(path[i].medianY, path[i + 1].medianY, (xOffset - (roadWidth / 2)) / float(roadWidth)) # else: # y = path[i].medianY # elif path[i + 1].deltaX < 0: # if xOffset < roadWidth / 2: # y = lerpInt(path[i + 1].medianY, path[i].medianY, (ceil(roadWidth / 2) + xOffset) / float(roadWidth)) # else: # y = path[i].medianY # elif path[i + 1].deltaZ > 0: # if zOffset >= roadWidth / 2: # y = lerpInt(path[i].medianY, path[i + 1].medianY, (zOffset - (roadWidth / 2)) / float(roadWidth)) # else: # y = path[i].medianY # else: # if zOffset < roadWidth / 2: # y = lerpInt(path[i + 1].medianY, path[i].medianY, (ceil(roadWidth / 2) + zOffset) / float(roadWidth)) # else: # y = path[i].medianY # else: # y = path[i].medianY # y = int(round(y)) y = getGroundYPos(x, z) pathCoordinates.append((x, y, z)) # Determining road blocks between diagonal path coordinates for i in xrange(len(path) - 1): # If path[i] and path[i + 1] are diagonal from each other if path[i].x != path[i + 1].x and path[i].z != path[i + 1].z: # Getting the bounds of the 2x2 square containing the diagonal path coordinates minx = min(path[i].x, path[i + 1].x) maxx = max(path[i].x, path[i + 1].x) minz = min(path[i].z, path[i + 1].z) maxz = max(path[i].z, path[i + 1].z) maxx += roadWidth maxz += roadWidth # Diagonally along y = x line if (path[i + 1].x - path[i].x) == (path[i + 1].z - path[i].z): # Filling in the bottom right half of the top left box of the 2x2 square for x in xrange(minx + 1, minx + roadWidth): for z in xrange(minz + roadWidth, maxz - ((minx + roadWidth) - x)): y = getGroundYPos(x, z) pathCoordinates.append((x, y, z)) # Filling in the top left half of the bottom right box of the 2x2 square for x in xrange(minx + roadWidth, maxx - 1): for z in xrange(minz + 1 + (x - (minx + roadWidth)), minz + roadWidth): y = getGroundYPos(x, z) pathCoordinates.append((x, y, z)) # Diagonally along y = -x line else: # Filling in the top right half of the bottom left box of the 2x2 square for x in xrange(minx + 1, minx + roadWidth): for z in xrange(minz + ((minx + roadWidth) - x), minz + roadWidth): y = getGroundYPos(x, z) pathCoordinates.append((x, y, z)) # Filling in the bottom left half of the top right box of the 2x2 square for x in xrange(minx + roadWidth, maxx - 1): for z in xrange(minz + roadWidth, maxz - 1 - (x - (minx + roadWidth))): y = getGroundYPos(x, z) pathCoordinates.append((x, y, z)) return pathCoordinates def getChunkAtPos(x, z): return chunkMap[(x/16)*16][(z/16)*16][1] def getChunksOverlappedWithBox(x, z, roadWidth): result = [getChunkAtPos(x, z)] if x/16 == (x+roadWidth)/16: if z/16 == (z+roadWidth)/16: pass else: result.append(getChunkAtPos(x, z + roadWidth)) else: result.append(getChunkAtPos(x + roadWidth, z)) if z/16 == (z+roadWidth)/16: pass else: result.append(getChunkAtPos(x, z + roadWidth)) result.append(getChunkAtPos(x + roadWidth, z + roadWidth)) return result def addEntryPoints(roadNode, segment): chunks = getChunksOverlappedWithBox(roadNode.x, roadNode.z, roadWidth) for i in chunks: if i in segment: chunkType = chunkMap[i.box.minx][i.box.minz][0] roadX = roadNode.x + (roadWidth / 2) roadZ = roadNode.z + (roadWidth / 2) minDist = 99999999 coordX = 0 coordZ = 0 for x in xrange(i.box.minx, i.box.maxx): for z in (i.box.minz, i.box.maxz - 1): dist = abs(x - roadX) + abs(z - roadZ) if dist < minDist: minDist = dist coordX = x coordZ = z for z in xrange(i.box.minz + 1, i.box.maxz - 1): for x in (i.box.minx, i.box.maxx - 1): dist = abs(x - roadX) + abs(z - roadZ) if dist < minDist: minDist = dist coordX = x coordZ = z entryPoints[chunkType].add((coordX, coordZ)) break # createHighway function startingSegmentIndex = randint(0, len(segments) - 1) endingChunks = [] otherSegments = set() for i in xrange(len(segments)): if i == startingSegmentIndex: startingSegment = segments[i] startingChunk = choice(list(startingSegment)) else: otherSegments.union(segments[i]) endingChunks.append(choice(list(segments[i]))) path, stoppingChunkIdentifier = getPath(startingChunk, endingChunks, startingSegment, otherSegments) if stoppingChunkIdentifier is not None: # Determining which segment stoppingChunkIdentifier is in for i in xrange(len(segments)): if stoppingChunkIdentifier in segments[i]: stoppingSegmentIndex = i break # Adding the entry points where the road begins and ends if len(path) > 1: for i in xrange(len(path)): overlappedChunks = getChunksOverlappedWithBox(path[i].x, path[i].z, roadWidth) found = False for j in overlappedChunks: if j not in segments[stoppingSegmentIndex]: found = True break if found: break addEntryPoints(path[i], segments[stoppingSegmentIndex]) addEntryPoints(path[-1], startingSegment) # Joining the two segments segments[startingSegmentIndex] = segments[startingSegmentIndex].union(segments[stoppingSegmentIndex]) del segments[stoppingSegmentIndex] pathCoordinates = getCompletePathCoordinates(path) highwayNodes += path return pathCoordinates def getChunkAtPos(x, z): return chunkMap[(x/16)*16][(z/16)*16][1] # Builds a road on each path coordinate def constructRoadOnPath(pathCoordinates, allDistrictChunks): # Removing all invalid blocks for i in xrange(len(pathCoordinates) - 1, -1, -1): x = pathCoordinates[i][0] y = pathCoordinates[i][1] z = pathCoordinates[i][2] if getChunkAtPos(x, z) in allDistrictChunks: del pathCoordinates[i] else: deleteTree(level, x, z) # Removes any tree on the road for i in xrange(1, 5): # carving out space above the road setBlock(level, (0, 0), x, y + i, z) yLookup = {} for i in pathCoordinates: yLookup[(i[0], i[2])] = i[1] for x, y, z in pathCoordinates: if (x - 1, z) in yLookup and yLookup[(x - 1, z)] == y-1: # If downhill at x-1, add stairs facing -x (West) setBlock(level, (stairsId, 0), x, y, z) elif (x + 1, z) in yLookup and yLookup[(x + 1, z)] == y-1: # If downhill at x+1, add stairs facing +x (East) setBlock(level, (stairsId, 1), x, y, z) elif (x, z - 1) in yLookup and yLookup[(x, z - 1)] == y-1: # If downhill at z-1, add stairs facing -z (North) setBlock(level, (stairsId, 2), x, y, z) elif (x, z + 1) in yLookup and yLookup[(x, z + 1)] == y-1: # If downhill at z+1, add stairs facing +z (South) setBlock(level, (stairsId, 3), x, y, z) else: # Otherwise, set it as a regular block setBlock(level, material, x, y, z) # createHighways function allDistrictChunks = set() for i in segments: allDistrictChunks = allDistrictChunks.union(i) highwayNodes = [] roadWidth = 3 counter = 0 pathCoordinates = [] while len(segments) > 1: pathCoordinates += createHighway(roadWidth, allDistrictChunks, highwayNodes) counter += 1 if counter >= 20: break constructRoadOnPath(pathCoordinates, allDistrictChunks) # Finalizing Entry Points for i in entryPoints: entryPoints[i] = list(entryPoints[i]) return entryPoints
#!/usr/bin/env python3 class PinholeCamera(object): def __init__(self, width, height, fx, fy, cx, cy, k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0): self.width = width self.height = height self.fx = fx self.fy = fy self.cx = cx self.cy = cy self.distortion = (abs(k1) > 0.0000001) self.d = [k1, k2, p1, p2, k3]
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^snap/', views.snap, name='snap'), url(r'^jupyter/', views.jupyter, name='jupyter'), url(r'^monitor/', views.monitor, name='monitor'), url(r'^rest/$', views.rest, name='rest'), url(r'^rest/raw/$', views.rest_raw, name='rest'), url(r'^rest/state/$', views.rest_state, name='rest'), url(r'^settings/$', views.configuration, name='configuration'), url(r'^settings/wifi_add/$', views.wifi_add, name='wifi_add'), url(r'^settings/wifi_suppr/$', views.wifi_suppr, name='wifi_suppr'), url(r'^settings/wifi_restart/$', views.wifi_restart, name='wifi_restart'), url(r'^reboot/', views.reboot, name='reboot'), url(r'^logs/$', views.logs, name='logs'), url(r'^logs/raw/$', views.rawlogs, name='rawlogs'), url(r'^shutdown/', views.shutdown, name='shutdown'), ]
#Sidharth Peri #HW 5 Question 1 #Honor Code: I pledge in my honor that I have abided by the Stevens Honor System #A program that uses a function to accept a list and returns a modified list with the elements squared #function that takes in a list as a parameter and returns the list with each element squared def square_list(squared_list): for i in range(len(squared_list)): squared_list[i] = squared_list[i]**2 #no need for return because lists are mutable #main function in which function call occurs def main(): num_list = [1,2,3,4,5,6] square_list(num_list) #call function, no need to reassign num_list because lists are mutable print("The squared list is:", num_list) main()
CODE SHARED edit edit edit
#!/usr/bin/python3 -u ### Install: # apt-get install python3-pip # pip3 install netifaces import sys import os import subprocess import time import netifaces INTERFACES = ['eth1','eth2','eth3','eth4','eth5','eth6'] ACTIONS = ['up','down'] ### CHANGE HERE TO YOUR INTERFACE ADDRESS MAIN_NETWORK = '10.55.55' MAIN_NETWORK_POSTFIX = '1' def interface_ip(name, timeout=15): ip = '' try: netifaces.ifaddresses(name) for i in range(0,timeout): try: ip = netifaces.ifaddresses(name)[netifaces.AF_INET][0]['addr'] except KeyError: if i == (timeout - 1): print('ERROR: interface "{}" stil have no IPv4 address'.format(name)) break time.sleep(1) except Exception as exeption: print('ERROR: {}'.format(str(exeption))) break except ValueError: print('ERROR: have no interface "{}"'.format(name)) except Exception as exeption: print('ERROR: {}'.format(str(exeption))) return ip def interface_up(name): ip = interface_ip(name) if ip == '': return -1 network = ip[:8] subprocess.getoutput('ip route add {network}.0/24 dev {name} table rt{name}'.format(network=network,name=name)) subprocess.getoutput('ip route add default via {network}.1 dev {name} table rt{name}'.format(network=network,name=name)) subprocess.getoutput('ip route add {main_network}.0/24 dev eth0 src {main_network}.{main_network_postfix} table rt{name}'.format(name=name, main_network=MAIN_NETWORK, main_network_postfix=MAIN_NETWORK_POSTFIX)) subprocess.getoutput('ip rule add from {network}.0/24 table rt{name}'.format(network=network,name=name)) subprocess.getoutput('ip rule add to {network}.0/24 table rt{name}'.format(network=network,name=name)) subprocess.getoutput('iptables -t nat -A POSTROUTING -o {name} -j MASQUERADE'.format(name=name)) subprocess.getoutput('iptables -t mangle -A POSTROUTING -m ttl --ttl-gt 50 -o {name} -j TTL --ttl-set 65'.format(name=name)) return 0 def interface_down(name): subprocess.getoutput('ip route flush table rt{name}'.format(name=name)) subprocess.getoutput('iptables -t nat -D POSTROUTING -o {name} -j MASQUERADE'.format(name=name)) subprocess.getoutput('iptables -t mangle -D POSTROUTING -m ttl --ttl-gt 50 -o {name} -j TTL --ttl-set 65'.format(name=name)) rule_network = '' for i in subprocess.getoutput('ip rule').splitlines(): if i.find('eth1') >= 0: for j in i.split(' '): if len(j.split('.')) == 4: rule_network = j if rule_network != '': subprocess.getoutput('ip rule del from {net} table rt{name}'.format(net=rule_network,name=name)) subprocess.getoutput('ip rule del to {net} table rt{name}'.format(net=rule_network,name=name)) return 0 def help(): print("""Usage:\n {} [ interface_name ] [ up | down ]""".format(sys.argv[0])) # Check arguments name = '' action = '' try: if not sys.argv[1] in INTERFACES: print('ERROR: interface "{}" not allowed to this rule'.format(sys.argv[1])) exit(-2) else: name = sys.argv[1] except: print('ERROR: not enouth arguments') help() exit(-3) try: if not sys.argv[2] in ACTIONS: print('ERROR: unknown action "{}"'.format(sys.argv[2])) exit(-2) else: action = sys.argv[2] except: print('ERROR: not enouth arguments') help() exit(-3) if action == 'up': exit(interface_up(name)) elif action == 'down': exit(interface_down(name)) print('ERROR: unknow error. How you fucking went to this?') help() exit(-5)
import os import requests s = "https://api.covid19india.org/csv/latest/state_wise.csv" def getdata(): return(requests.get(s).content)
import pytest from mitmproxy.test import taddons from mitmproxy.test import tflow from mitmproxy import io from mitmproxy import exceptions from mitmproxy import options from mitmproxy.addons import streamfile def test_configure(tmpdir): sa = streamfile.StreamFile() with taddons.context(options=options.Options()) as tctx: with pytest.raises(exceptions.OptionsError): tctx.configure(sa, streamfile=str(tmpdir)) with pytest.raises(Exception, match="Invalid filter"): tctx.configure( sa, streamfile=str(tmpdir.join("foo")), streamfile_filter="~~" ) tctx.configure(sa, streamfile_filter="foo") assert sa.filt tctx.configure(sa, streamfile_filter=None) assert not sa.filt def rd(p): x = io.FlowReader(open(p, "rb")) return list(x.stream()) def test_tcp(tmpdir): sa = streamfile.StreamFile() with taddons.context() as tctx: p = str(tmpdir.join("foo")) tctx.configure(sa, streamfile=p) tt = tflow.ttcpflow() sa.tcp_start(tt) sa.tcp_end(tt) tctx.configure(sa, streamfile=None) assert rd(p) def test_simple(tmpdir): sa = streamfile.StreamFile() with taddons.context() as tctx: p = str(tmpdir.join("foo")) tctx.configure(sa, streamfile=p) f = tflow.tflow(resp=True) sa.request(f) sa.response(f) tctx.configure(sa, streamfile=None) assert rd(p)[0].response tctx.configure(sa, streamfile="+" + p) f = tflow.tflow() sa.request(f) tctx.configure(sa, streamfile=None) assert not rd(p)[1].response
import cv2 import tensorflow as tf import constants as c def prepare(filepath, width, height): img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) img_array = img_array/255.0 new_array = cv2.resize(img_array, (width, height)) return new_array.reshape(-1, width, height, 1) def predict(filepath, model, model_name): prediction = None if model_name is "screen": prediction = model.predict_classes([prepare(filepath, c.IMG_SIZE_SCREEN_WIDTH, c.IMG_SIZE_SCREEN_HEIGHT)]) elif model_name is "character": prediction = model.predict_classes([prepare(filepath, c.IMG_SIZE_CHAR_WIDTH, c.IMG_SIZE_CHAR_HEIGHT)]) else: print("Invalid model name provided", flush=True) return None # print(prediction) index = prediction[0] return index # prediction = model.predict_classes([prepare('dog.jpg')])
import json with open('group3_data.json', 'r', encoding='utf8')as fp: json_data = json.load(fp) userlist=[] for user in json_data: userlist.append(user) print(userlist) print(len(userlist))
import math class GeoLocation: ''' Class representing a coordinate on a sphere, most likely Earth. This class is based from the code smaple in this paper: http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates The owner of that website, Jan Philip Matuschek, is the full owner of his intellectual property. This class is simply a Python port of his very useful Java code. All code written by Jan Philip Matuschek and ported by me (which is all of this class) is owned by Jan Philip Matuschek. ''' MIN_LAT = math.radians(-90) MAX_LAT = math.radians(90) MIN_LON = math.radians(-180) MAX_LON = math.radians(180) EARTH_RADIUS = 6378.1 # kilometers @classmethod def from_degrees(cls, deg_lat, deg_lon): rad_lat = math.radians(deg_lat) rad_lon = math.radians(deg_lon) return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon) @classmethod def from_radians(cls, rad_lat, rad_lon): deg_lat = math.degrees(rad_lat) deg_lon = math.degrees(rad_lon) return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon) def __init__( self, rad_lat, rad_lon, deg_lat, deg_lon ): self.rad_lat = float(rad_lat) self.rad_lon = float(rad_lon) self.deg_lat = float(deg_lat) self.deg_lon = float(deg_lon) self._check_bounds() def __str__(self): degree_sign= u'\N{DEGREE SIGN}' return ("({0:.4f}deg, {1:.4f}deg) = ({2:.6f}rad, {3:.6f}rad)").format( self.deg_lat, self.deg_lon, self.rad_lat, self.rad_lon) def _check_bounds(self): if (self.rad_lat < GeoLocation.MIN_LAT or self.rad_lat > GeoLocation.MAX_LAT or self.rad_lon < GeoLocation.MIN_LON or self.rad_lon > GeoLocation.MAX_LON): raise Exception("Illegal arguments") def distance_to(self, other, radius=EARTH_RADIUS): ''' Computes the great circle distance between this GeoLocation instance and the other. ''' return radius * math.acos( math.sin(self.rad_lat) * math.sin(other.rad_lat) + math.cos(self.rad_lat) * math.cos(other.rad_lat) * math.cos(self.rad_lon - other.rad_lon) ) def bounding_locations(self, distance, radius=EARTH_RADIUS): ''' Computes the bounding coordinates of all points on the surface of a sphere that has a great circle distance to the point represented by this GeoLocation instance that is less or equal to the distance argument. Param: distance - the distance from the point represented by this GeoLocation instance. Must be measured in the same unit as the radius argument (which is kilometers by default) radius - the radius of the sphere. defaults to Earth's radius. Returns a list of two GeoLoations - the SW corner and the NE corner - that represents the bounding box. ''' if radius < 0 or distance < 0: raise Exception("Illegal arguments") # angular distance in radians on a great circle rad_dist = distance / radius min_lat = self.rad_lat - rad_dist max_lat = self.rad_lat + rad_dist if min_lat > GeoLocation.MIN_LAT and max_lat < GeoLocation.MAX_LAT: delta_lon = math.asin(math.sin(rad_dist) / math.cos(self.rad_lat)) min_lon = self.rad_lon - delta_lon if min_lon < GeoLocation.MIN_LON: min_lon += 2 * math.pi max_lon = self.rad_lon + delta_lon if max_lon > GeoLocation.MAX_LON: max_lon -= 2 * math.pi # a pole is within the distance else: min_lat = max(min_lat, GeoLocation.MIN_LAT) max_lat = min(max_lat, GeoLocation.MAX_LAT) min_lon = GeoLocation.MIN_LON max_lon = GeoLocation.MAX_LON return [ GeoLocation.from_radians(min_lat, min_lon) , GeoLocation.from_radians(max_lat, max_lon) ] if __name__ == '__main__': # receive data(coordinate and range) from ruby method within geocalc.rb and returns an array of bounding box coordinates import sys # This module provides a number of functions and variables that can be used to manipulate different parts of the Python runtime environment. a = [] # "a" will be an array with a[0],a[1] being a starting coordinate (lat, lng), a[2] is range in miles temp = sys.stdin.readlines() # Call readline() repeatedly and return a list of the lines so read. for item in temp: a.append(float(item)) loc = GeoLocation.from_degrees(a[0], a[1]) distance = a[2]/.62137 # conversion from miles to kilometers SW_loc, NE_loc = loc.bounding_locations(distance) # print loc.distance_to(SW_loc) # print loc.distance_to(NE_loc) print SW_loc print NE_loc
n = 0 qt = 0 s = 0 while(n != 999): n = int(input('Digite um número [999 finaliza]: ')) if(n != 999): qt += 1 s += n print('Foram digitados {} números, cuja soma é {}.'.format(qt, s))
import filecmp import logging import os import tempfile import unittest import xarray as xr from labop.data import serialize_sample_format from labop.strings import Strings from labop.utils.helpers import file_diff, initialize_protocol from labop_convert import MarkdownSpecialization from labop_convert.behavior_specialization import DefaultBehaviorSpecialization xr.set_options(display_expand_data=False) import sbol3 import tyto import labop import uml from labop.execution_engine import ExecutionEngine from labop_convert.plate_coordinates import ( coordinate_rect_to_row_col_pairs, coordinate_to_row_col, get_sample_list, ) OUT_DIR = os.path.join(os.path.dirname(__file__), "out") if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) filename = "".join(__file__.split(".py")[0].split("/")[-1:]) logger: logging.Logger = logging.Logger(__file__) logger.setLevel(logging.INFO) # Need logical to physical mapping # Need volumes, and source contents # Mix needs order for transfers (could be sub-protocol, media, then low volume) # Need to surface assumptions about operations. (Big then small, other heuristics?, type of reagent?) # Opentrons will perform in column order unless specified. class TestProtocolEndToEnd(unittest.TestCase): def test_create_protocol(self): protocol, doc = initialize_protocol() # The aliquots will be the coordinates of the SampleArray and SampleMap objects aliquot_ids = get_sample_list("A1:A4") # Make Components for the contents of the SampleArray reagent1 = sbol3.Component( "ddH2Oa", "https://identifiers.org/pubchem.substance:24901740" ) reagent2 = sbol3.Component( "ddH2Ob", "https://identifiers.org/pubchem.substance:24901740" ) reagents = [reagent1, reagent2] # TODO ContainerSpec without parameters will refer to a logical container of unspecified size and geometry source_spec = labop.ContainerSpec( "abstractPlateRequirement1", name="abstractPlateRequirement1" ) target_spec = labop.ContainerSpec( "abstractPlateRequirement2", name="abstractPlateRequirement2" ) # Arbitrary volume to use in specifying the reagents in the container. default_volume = sbol3.Measure(600, tyto.OM.microliter) source_array = labop.SampleArray( name="source", container_type=source_spec, initial_contents=serialize_sample_format( xr.DataArray( [ [default_volume.value for reagent in reagents] for id in aliquot_ids ], name="source", dims=(Strings.SAMPLE, "contents"), attrs={"units": "uL"}, coords={ Strings.SAMPLE: aliquot_ids, "contents": [r.identity for r in reagents], }, ) ), ) create_source = protocol.primitive_step( "EmptyContainer", specification=source_spec, sample_array=source_array, ) target_array = labop.SampleArray( name="target", container_type=target_spec, initial_contents=serialize_sample_format( xr.DataArray( [[0.0 for reagent in reagents] for id in aliquot_ids], name="target", dims=(Strings.SAMPLE, "contents"), attrs={"units": "uL"}, coords={ Strings.SAMPLE: aliquot_ids, "contents": [r.identity for r in reagents], }, ) ), ) create_target = protocol.primitive_step( "EmptyContainer", specification=target_spec, sample_array=target_array, ) plan_mapping = serialize_sample_format( xr.DataArray( [ [ [ [ # f"{source_array}:{source_aliquot}->{target_array}:{target_aliquot}" # rand(0.0, 10.0) 10.0 for target_aliquot in aliquot_ids ] for target_array in [target_array.name] ] for source_aliquot in aliquot_ids ] for source_array in [source_array.name] ], dims=( "source_array", "source_aliquot", "target_array", "target_aliquot", ), attrs={"units": "uL"}, coords={ "source_array": [source_array.name], "source_aliquot": aliquot_ids, "target_array": [target_array.name], "target_aliquot": aliquot_ids, }, ) ) # The SampleMap specifies the sources and targets, along with the mappings. plan = labop.SampleMap( sources=[source_array], targets=[target_array], values=plan_mapping ) # The outputs of the create_source and create_target calls will be identical # to the source_array and target_array. They will not be on the output pin # until execution, but the SampleMap needs to reference them. transfer_by_map = protocol.primitive_step( "TransferByMap", source=create_source.output_pin("samples"), destination=create_target.output_pin("samples"), plan=plan, amount=sbol3.Measure(0, tyto.OM.milliliter), temperature=sbol3.Measure(30, tyto.OM.degree_Celsius), ) measure = protocol.primitive_step( "MeasureAbsorbance", samples=create_target.output_pin("samples"), wavelength=sbol3.Measure(600, tyto.OM.nanometer), ) result = protocol.designate_output( "absorbance", sbol3.OM_MEASURE, measure.output_pin("measurements") ) protocol.order(result, protocol.final()) ######################################## # Validate and write the document agent = sbol3.Agent("test_agent") # Execute the protocol # In order to get repeatable timings, we use ordinal time in the test # where each timepoint is one second after the previous time point ee = ExecutionEngine( use_ordinal_time=True, out_dir=OUT_DIR, specializations=[ DefaultBehaviorSpecialization(), # MarkdownSpecialization("samplemap.md") ], ) execution = ee.execute( protocol, agent, id="test_execution", parameter_values=[] ) # result = xr.DataArray.from_dict(json.loads(execution.parameter_values[0].value.value.lookup().contents)) execution.to_dot().render(os.path.join(OUT_DIR, filename)) print("Validating and writing protocol") v = doc.validate() assert len(v) == 0, "".join(f"\n {e}" for e in v) temp_name = os.path.join(tempfile.gettempdir(), f"{filename}.nt") doc.write(temp_name, sbol3.SORTED_NTRIPLES) print(f"Wrote file as {temp_name}") comparison_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "testfiles", filename + ".nt", ) # doc.write(comparison_file, sbol3.SORTED_NTRIPLES) print(f"Comparing against {comparison_file}") diff = "".join(file_diff(comparison_file, temp_name)) print(f"Difference:\n{diff}") assert filecmp.cmp(temp_name, comparison_file), "Files are not identical" print("File identical with test file") def test_mask(self): self.assertNotEqual( get_sample_list("A1:H12"), [ "A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1", "I1", "J1", "K1", "L1", "A2", "B2", "C2", "D2", "E2", "F2", "G2", "H2", "I2", "J2", "K2", "L2", "A3", "B3", "C3", "D3", "E3", "F3", "G3", "H3", "I3", "J3", "K3", "L3", "A4", "B4", "C4", "D4", "E4", "F4", "G4", "H4", "I4", "J4", "K4", "L4", "A5", "B5", "C5", "D5", "E5", "F5", "G5", "H5", "I5", "J5", "K5", "L5", "A6", "B6", "C6", "D6", "E6", "F6", "G6", "H6", "I6", "J6", "K6", "L6", "A7", "B7", "C7", "D7", "E7", "F7", "G7", "H7", "I7", "J7", "K7", "L7", "A8", "B8", "C8", "D8", "E8", "F8", "G8", "H8", "I8", "J8", "K8", "L8", ], ) self.assertEqual( get_sample_list("A1:H12"), [ "A1", "B1", "C1", "D1", "E1", "F1", "G1", "H1", "A2", "B2", "C2", "D2", "E2", "F2", "G2", "H2", "A3", "B3", "C3", "D3", "E3", "F3", "G3", "H3", "A4", "B4", "C4", "D4", "E4", "F4", "G4", "H4", "A5", "B5", "C5", "D5", "E5", "F5", "G5", "H5", "A6", "B6", "C6", "D6", "E6", "F6", "G6", "H6", "A7", "B7", "C7", "D7", "E7", "F7", "G7", "H7", "A8", "B8", "C8", "D8", "E8", "F8", "G8", "H8", "A9", "B9", "C9", "D9", "E9", "F9", "G9", "H9", "A10", "B10", "C10", "D10", "E10", "F10", "G10", "H10", "A11", "B11", "C11", "D11", "E11", "F11", "G11", "H11", "A12", "B12", "C12", "D12", "E12", "F12", "G12", "H12", ], ) self.assertEqual(coordinate_rect_to_row_col_pairs("H11:H12")[1], (7, 11)) self.assertEqual(coordinate_to_row_col("H12"), (7, 11)) if __name__ == "__main__": unittest.main()
#!/usr/bin/env python3 """fix_fits.py A script to undo the compression on HDUs as implemented in the .fz file format. It will not overwirte an existing file. Requires: astropy docopt Usage: fix_fits.py INPUT [ -o OUTPUT ] Options: -h --help Show this message. -o OUTPUT if not specified then name is INPUT-uncompressed.fits -v --version Display version information """ __author__="Dr Evan Crawford (e.crawford@westernsydeny.edu.au)" __version__="0.1" from docopt import docopt import os from astropy.io import fits import astropy.wcs as W if __name__ == "__main__": ## Grab docopt arguments arguments=docopt(__doc__,version="%s -- %s"%(__file__,__version__)) #print(arguments) if arguments['-o']: output=arguments['-o'] else: output='.'.join(os.path.basename(arguments['INPUT']).split('.')[:-2])+'-uncompressed.fits' f=fits.open(arguments['INPUT']) newf=fits.HDUList() newf.append(f[0]) for h in f[1:]: wcs=W.WCS(h.header) n=fits.ImageHDU(h.data) n.header.update(wcs.to_header()) newf.append(n) newf.writeto(output)
import numpy as np def start_config(N,M): '''Start config.''' return np.random.randint(2,size=N*M).reshape((N,M)) def grab_neighbors(i,j,config): '''Grab neighbors with PBCs''' confsh=config.shape N,M=confsh[0],confsh[1] top = [i-1 if i-1>=0 else N-1, j ] bottom = [(i+1)%N,j] left = [i,j-1 if j-1>=0 else M-1] right = [i,(j+1)%M] return np.array([config[top[0],top[1]],config[bottom[0],bottom[1]],config[right[0],right[1]],config[left[0],left[1]]])
# -*- coding: utf-8 -*- import unittest from minecraft_dynmap_timemachine import dynmap from minecraft_dynmap_timemachine import projection class TestDynMapClassMethods(unittest.TestCase): def test_dynmap_parse_config_urls(self): config_urls = dynmap.DynMap.parse_config_urls_string("var config = { url : { key: \"value\" } };") self.assertIsInstance(config_urls, dict) self.assertTrue('key', config_urls) self.assertTrue(config_urls['key'], 'value') class TestDynMap(unittest.TestCase): @classmethod def setup_class(cls): # cls._db_dynmap_name = 'test_dynmap.db' # try: # os.remove(cls._db_dynmap_name) # except: # pass # cls._db = SqliteDatabase(cls._db_dynmap_name) # dynmap.database_proxy.initialize(cls._db) # dynmap.DynMap.create_table() cls.dm_majncraft = dynmap.DynMap('http://map.majncraft.cz/') # cls.dm_majncraft.save() # @classmethod # def teardown_class(cls): # cls._db.close() # os.remove(cls._db_dynmap_name) def test_dynmap_config_majncraft_cz(self): config_urls = self.dm_majncraft.urls # self.dm_majncraft.save() # self.assertGreater(len(self.dm_majncraft.config_urls_json), 0) self.assertIn('configuration', config_urls) self.assertIn('update', config_urls) self.assertIn('tiles', config_urls) self.assertIn('markers', config_urls) # global dm_majnuj_cz config = self.dm_majncraft.config # self.dm_majncraft.save() # self.assertGreater(len(self.dm_majncraft.config_json), 0) self.assertIn('worlds', config) self.assertIn('dynmapversion', config) self.assertIn('coreversion', config) self.assertIn('title', config) def test_worlds_majncraft_cz(self): worlds = self.dm_majncraft.worlds self.assertGreaterEqual(len(worlds), 3) # assume there's a few of them. Modify if needed. self.assertIn('world', worlds) self.assertEqual(worlds['world'].name, 'world') self.assertEqual(worlds['world'].title, u'Eternia | Overworld') print() for name in worlds.keys(): print(name, ' - ', worlds[name].title) def test_maps_majncraft_cz(self): maps = self.dm_majncraft.worlds['world'].maps self.assertGreater(len(maps), 0) # self.assertTrue(dynmap.Map.is_known_perspective(maps['surface'].perspective)) # self.assertTrue(dynmap.Map.is_known_shader(maps['surface'].shader)) self.assertEqual(maps['surface'].name, 'surface') self.assertEqual(maps['surface'].title, u'Prostorová - Den') self.assertGreater(len(maps['surface'].worldtomap), 0) # test unknown perspective and shader # self.assertRaises(dynmap.MapException, dynmap.Map, {'perspective': 'fake_perspective', 'shader': 'stdtexture'}) # self.assertRaises(dynmap.MapException, dynmap.Map, {'perspective': 'iso_SE_60_hires', 'shader': 'fake_shader'}) print() for name in maps.keys(): print(name, ' - ', maps[name].title) def test_worldtomap(self): dm_map = self.dm_majncraft.worlds['world'].maps['surface'] self.assertIsInstance(dm_map.worldtomap, list) self.assertEqual(len(dm_map.worldtomap), 9) def test_map_image_url(self): dm_map = self.dm_majncraft.worlds['world'].maps['surface'] m_loc = projection.MinecraftLocation(3020, 65, 700, dm_map.worldtomap) t_loc = m_loc.to_tile_location(0) print() print(dm_map.image_url(t_loc)) t_loc = m_loc.to_tile_location(1) print(dm_map.image_url(t_loc)) t_loc = m_loc.to_tile_location(2) print(dm_map.image_url(t_loc)) dm_map = self.dm_majncraft.worlds['world'].maps['flat'] m_loc = projection.MinecraftLocation(3020, 65, 700, dm_map.worldtomap) t_loc = m_loc.to_tile_location(0) print(dm_map.image_url(t_loc)) t_loc = m_loc.to_tile_location(1) print(dm_map.image_url(t_loc)) t_loc = m_loc.to_tile_location(2) print(dm_map.image_url(t_loc))
# Create a program that pulls data from OpenWeatherMap.org that prints out information about the current weather, such as the high, the low, and the amount of rain for wherever you live. Depending on how skilled you are, you can actually do some neat stuff with this project. # # Subgoals # # Print out data for the next 5-7 days so you have a 5 day/week long forecast. # # Print the data to another file that you can open up and view at, instead of viewing the information in the command line. # # If you know html, write a file that you can print information to so that your project is more interesting. Here is an example of the results from what I threw together. # # Tips # # APIs that are in Json are essentially lists and dictionaries. Remember that to reference something in a list, you must refer to it by what number element it is in the list, and to reference a key in a dictionary, you must refer to it by it's name. # # Don't like Celsius? Add &units=imperial to the end of the URL of the API to receive your data in Fahrenheit. import requests import math API_key = YOUR_API city_name = input("Enter a city name for next five day forecast: ") base_url = "http://api.openweathermap.org/data/2.5/forecast?" final_url = base_url + "appid=" + API_key + "&q=" + city_name.lower() + "&units=metric" weather_data = requests.get(final_url).json() print("") try: for day in weather_data['list'][::8]: print("{} celsius ({})".format(math.floor(day['main']['temp']), day['weather'][0]['main'])) except KeyError: print("No records for that city")
import bs4 from bs4 import BeautifulSoup as soup from urllib.request import urlopen as uReq import os myurl = ( "https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics+cards" ) # opening a connection and grabbing the page uClient = uReq(myurl) page_html = uClient.read() # storing the html page in a variable uClient.close() # parses the html we get from uReq page_soup = soup(page_html, "html.parser") # grabs all the products on the page containers = page_soup.findAll("div", {"class": "item-container"}) # get the first product of the list container = containers[0] # grab the title by navigating through the html title = container.div.div.a.img["title"] print(title) # cycle through all the containers in our list and print the titles for container in containers: brand = container.div.div.a.img["title"] title_container = container.findAll("a", {"class": "item-title"}) title = title_container[0].text price_container = container.findAll("li", {"class": "price-current"}) price = price_container[0].strong.text price = "$" + price + ".99" shipping_container = container.findAll("li", {"class": "price-ship"}) shipping = shipping_container[0].text print(brand) print(title) print(price) print(shipping) print(" ") print(" ")
# Verifique se um inteiro positivo n é primo #se o % = 0 não é primo n = int(input('Digite um número inteiro positivo para saber se é primo: ')) total = 0 for c in range(1, n + 1): if n % c == 0: total += 1 if total == 2: print(f'O número {n} foi divisível {total} vezes. Ele é primo') else: print(f'O número {n} foi divisível {total} vezes. Ele não é primo.')
# Write a Python program to get unique values from a list def uniqueValues(listprovided): unique = [] for i in range(len(listprovided)): if listprovided[i] not in unique: unique.append(listprovided[i]) return unique listprovided = [10, 20, 30, 40, 20, 50, 60, 40] output = uniqueValues(listprovided) print(output)
n = input() s = set(map(int, input().split())) for _ in range(int(input())): x = list(input().split()) try: int(x[1]) except IndexError: eval('s.%s' % x[0]) else: eval('s.%s(%d)' % (x[0], int(x[1]))) print(sum(s))
from panda3d.core import Vec3, Point3 from bsp.bspbase import BSPUtils from .Line import Line from . import PlaneClassification from .Plane import Plane class Winding: def __init__(self, vertices, plane): self.vertices = vertices self.plane = plane @staticmethod def fromVertices(vertices): poly = Winding(vertices, Plane.fromVertices(vertices[0], vertices[1], vertices[2])) poly.simplify() return poly # Creates a huge quadrilateral winding given a plane. @staticmethod def fromPlaneAndRadius(plane, radius = 32768): normal = plane.getNormal() dist = -plane.getW() # Find the major axis x = plane.getClosestAxisToNormal() up = Vec3.unitX() if x == Vec3.unitZ() else Vec3.unitZ() v = up.dot(normal) up = BSPUtils.extrude(up, -v, normal) up.normalize() org = normal * dist right = up.cross(normal) up = up * radius right = right * radius # Project a really big axis aligned box onto the plane verts = [ org - right + up, org + right + up, org + right - up, org - right - up ] poly = Winding(verts, plane) return poly def isValid(self): for vert in self.vertices: if self.plane.onPlane(vert) != 0: # Vert doesn't lie within the plane. return False return True def simplify(self): # Remove colinear vertices i = 0 while 1: numVerts = len(self.vertices) - 2 if i >= numVerts: break v1 = self.vertices[i] v2 = self.vertices[i + 2] p = self.vertices[i + 1] line = Line(v1, v2) # If the midpoint is on the line, remove it if line.closestPoint(p).almostEqual(p): del self.vertices[i + 1] i += 1 def xform(self, mat): for i in range(len(self.vertices)): self.vertices[i] = mat.xformPoint(self.vertices[i]) self.plane = Plane.fromVertices(self.vertices[0], self.vertices[1], self.vertices[2]) def isConvex(self, epsilon = 0.001): for i in range(len(self.vertices)): v1 = self.vertices[i] v2 = self.vertices[(i + 1) % len(self.vertices)] v3 = self.vertices[(i + 2) % len(self.vertices)] l1 = (v1 - v2).normalized() l2 = (v3 - v2).normalized() cross = l1.cross(l2) if abs(self.plane.distToPlane(v2 + cross)) > epsilon: return False return True def getOrigin(self): return self.plane.getNormal() * -self.plane.getW() def classifyAgainstPlane(self, plane): front = 0 back = 0 onplane = 0 count = len(self.vertices) for i in range(count): test = plane.onPlane(self.vertices[i]) if test <= 0: back += 1 if test >= 0: front += 1 if test == 0: onplane += 1 if onplane == count: return PlaneClassification.OnPlane if front == count: return PlaneClassification.Front if back == count: return PlaneClassification.Back return PlaneClassification.Spanning def splitInPlace(self, clipPlane, epsilon = 0.01): front = self.split(clipPlane, epsilon) if front: self.vertices = list(front.vertices) self.plane = Plane(front.plane) return True return False def split(self, plane, epsilon = 0.01): dists = [] sides = [] counts = [0, 0, 0] norm = plane.getNormal() dist = -plane.getW() # Determine sides for each point for i in range(len(self.vertices)): dot = self.vertices[i].dot(norm) dot -= dist dists.append(dot) if dot > epsilon: sides.append(PlaneClassification.Front) elif dot < -epsilon: sides.append(PlaneClassification.Back) else: sides.append(PlaneClassification.OnPlane) counts[sides[i]] += 1 sides.append(sides[0]) dists.append(dists[0]) if not counts[0] and not counts[1]: return self if not counts[0]: return None if not counts[1]: return self verts = [] for i in range(len(self.vertices)): p1 = self.vertices[i] if sides[i] in [PlaneClassification.Front, PlaneClassification.OnPlane]: verts.append(p1) if sides[i] == PlaneClassification.OnPlane: continue if sides[i+1] in [PlaneClassification.OnPlane, sides[i]]: continue # Generate a split point if i == len(self.vertices) - 1: p2 = self.vertices[0] else: p2 = self.vertices[i+1] mid = Point3(0) dot = dists[i] / (dists[i]-dists[i+1]) for j in range(3): # Avoid round off error when possible if norm[j] == 1: mid[j] = dist elif norm[j] == -1: mid[j] = -dist else: mid[j] = p1[j] + dot*(p2[j]-p1[j]) verts.append(mid) return Winding.fromVertices(verts) def roundPoints(self, epsilon = 0.01): # # Round all points in the winding that are within `epsilon` of # integer values # for i in range(len(self.vertices)): for j in range(3): v = self.vertices[i][j] v1 = round(v) if (v != v1) and abs(v - v1) < epsilon: self.vertices[i][j] = v1 def flip(self): self.vertices.reverse() self.plane.flip()
#!/usr/bin/python3 def multiply_by_2(a_dictionary): key_list = sorted(a_dictionary.keys()) new_dict = {key: a_dictionary[key] * 2 for key in key_list} return new_dict
""" 19. Palindrome Number Question: Determine whether an integer is a palindrome. Do this without extra space. Example Questions Candidate Might Ask: Q: Does negative integer such as –1 qualify as a palindrome? A: For the purpose of discussion here, we define negative integers as non-palindrome. """ class Solution: def isPalindrome(self, x): """ :type x: int :rtype: bool """ if x < 0: return False y = str(x) i = 0 j = len(y) - 1 while i < j: if y[i] == y[j]: i += 1 j -= 1 else: return False return True
"""Tests for Doof""" import asyncio from contextlib import asynccontextmanager from datetime import datetime, timedelta import pytest import pytz from bot import ( CommandArgs, Bot, FINISH_RELEASE_ID, NEW_RELEASE_ID, ) from conftest import ( ANNOUNCEMENTS_CHANNEL, LIBRARY_TEST_REPO_INFO, WEB_TEST_REPO_INFO, ) from constants import ( LIBRARY_TYPE, TRAVIS_FAILURE, TRAVIS_SUCCESS, WEB_APPLICATION_TYPE, ) from exception import ( ReleaseException, ResetException, ) from github import get_org_and_repo from lib import ( format_user_id, next_versions, now_in_utc, ReleasePR, ) from test_util import ( async_context_manager_yielder, async_gen_wrapper, make_pr, make_issue, make_parsed_issue, ) pytestmark = pytest.mark.asyncio GITHUB_ACCESS = 'github' SLACK_ACCESS = 'slack' # pylint: disable=redefined-outer-name, too-many-lines class DoofSpoof(Bot): """Testing bot""" def __init__(self, *, loop): """Since the testing bot isn't contacting slack or github we don't need these tokens here""" super().__init__( doof_id="Doofenshmirtz", slack_access_token=SLACK_ACCESS, github_access_token=GITHUB_ACCESS, timezone=pytz.timezone("America/New_York"), repos_info=[WEB_TEST_REPO_INFO, LIBRARY_TEST_REPO_INFO, ANNOUNCEMENTS_CHANNEL], loop=loop, ) self.slack_users = [] self.messages = {} async def lookup_users(self): """Users in the channel""" return self.slack_users def _append(self, channel_id, message_dict): """Add a message to the list so we can assert it was sent""" if channel_id not in self.messages: self.messages[channel_id] = [] self.messages[channel_id].append(message_dict) async def _say(self, *, channel_id, text=None, attachments=None, message_type=None): """Quick and dirty message recording""" self._append(channel_id, {"text": text, "attachments": attachments, "message_type": message_type}) async def update_message(self, *, channel_id, timestamp, text=None, attachments=None): """ Record message updates """ self._append(channel_id, {"text": text, "attachments": attachments, "timestamp": timestamp}) async def delete_message(self, *, channel_id, timestamp): """ Record message delete """ self._append(channel_id, {"timestamp": timestamp}) def said(self, text, *, attachments=None, channel_id=None): """Did doof say this thing?""" for message_channel_id, messages in self.messages.items(): if channel_id is None or message_channel_id == channel_id: for message in messages: if text not in str(message): continue if attachments is None: return True else: return attachments == message["attachments"] return False @pytest.fixture def doof(event_loop): """Create a Doof""" yield DoofSpoof(loop=event_loop) async def test_release_notes(doof, test_repo, test_repo_directory, mocker): """Doof should show release notes""" old_version = "0.1.2" update_version_mock = mocker.patch('bot.update_version', autospec=True, return_value=old_version) mocker.patch( 'bot.init_working_dir', side_effect=async_context_manager_yielder(test_repo_directory) ) notes = "some notes" create_release_notes_mock = mocker.async_patch('bot.create_release_notes', return_value=notes) any_new_commits_mock = mocker.async_patch('bot.any_new_commits', return_value=True) org, repo = get_org_and_repo(test_repo.repo_url) release_pr = ReleasePR('version', f'https://github.com/{org}/{repo}/pulls/123456', 'body') get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=release_pr) await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['release', 'notes'], ) update_version_mock.assert_called_once_with("9.9.9", working_dir=test_repo_directory) create_release_notes_mock.assert_called_once_with( old_version, with_checkboxes=False, base_branch="master", root=test_repo_directory ) any_new_commits_mock.assert_called_once_with(old_version, base_branch="master", root=test_repo_directory) get_release_pr_mock.assert_called_once_with(github_access_token=GITHUB_ACCESS, org=org, repo=repo) assert doof.said("Release notes since {}".format(old_version)) assert doof.said(notes) assert doof.said(f"And also! There is a release already in progress: {release_pr.url}") async def test_release_notes_no_new_notes(doof, test_repo, test_repo_directory, mocker): """Doof should show that there are no new commits""" mocker.patch( 'bot.init_working_dir', side_effect=async_context_manager_yielder(test_repo_directory) ) old_version = "0.1.2" update_version_mock = mocker.patch('bot.update_version', autospec=True, return_value=old_version) notes = "no new commits" create_release_notes_mock = mocker.async_patch('bot.create_release_notes', return_value=notes) org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=None) any_new_commits_mock = mocker.async_patch('bot.any_new_commits', return_value=False) await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['release', 'notes'], ) any_new_commits_mock.assert_called_once_with(old_version, base_branch="master", root=test_repo_directory) update_version_mock.assert_called_once_with("9.9.9", working_dir=test_repo_directory) create_release_notes_mock.assert_called_once_with( old_version, with_checkboxes=False, base_branch="master", root=test_repo_directory ) get_release_pr_mock.assert_called_once_with(github_access_token=GITHUB_ACCESS, org=org, repo=repo) assert doof.said("Release notes since {}".format(old_version)) assert not doof.said("Start a new release?") async def test_release_notes_buttons(doof, test_repo, test_repo_directory, mocker): """Doof should show release notes and then offer buttons to start a release""" mocker.patch( 'bot.init_working_dir', side_effect=async_context_manager_yielder(test_repo_directory) ) old_version = "0.1.2" update_version_mock = mocker.patch('bot.update_version', autospec=True, return_value=old_version) notes = "some notes" create_release_notes_mock = mocker.async_patch('bot.create_release_notes', return_value=notes) org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=None) any_new_commits_mock = mocker.async_patch('bot.any_new_commits', return_value=True) await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['release', 'notes'], ) any_new_commits_mock.assert_called_once_with(old_version, base_branch="master", root=test_repo_directory) update_version_mock.assert_called_once_with("9.9.9", working_dir=test_repo_directory) create_release_notes_mock.assert_called_once_with( old_version, with_checkboxes=False, base_branch="master", root=test_repo_directory ) get_release_pr_mock.assert_called_once_with(github_access_token=GITHUB_ACCESS, org=org, repo=repo) assert doof.said("Release notes since {}".format(old_version)) assert doof.said(notes) minor_version, patch_version = next_versions(old_version) assert doof.said("Start a new release?", attachments=[ { 'fallback': 'New release', 'callback_id': 'new_release', 'actions': [ {'name': 'minor_release', 'text': minor_version, 'value': minor_version, 'type': 'button'}, {'name': 'patch_release', 'text': patch_version, 'value': patch_version, 'type': 'button'}, {'name': 'cancel', 'text': "Dismiss", 'value': "cancel", 'type': 'button', "style": "danger"} ] } ]) assert not doof.said("And also! There is a release already in progress") async def test_version(doof, test_repo, mocker): """ Doof should tell you what version the latest release was """ a_hash = 'hash' version = '1.2.3' fetch_release_hash_mock = mocker.async_patch('bot.fetch_release_hash', return_value=a_hash) get_version_tag_mock = mocker.async_patch('bot.get_version_tag', return_value="v{}".format(version)) await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['version'], ) assert doof.said( "Wait a minute! My evil scheme is at version {}!".format(version) ) fetch_release_hash_mock.assert_called_once_with(test_repo.prod_hash_url) get_version_tag_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, commit_hash=a_hash, ) # pylint: disable=too-many-locals @pytest.mark.parametrize("command", ['release', 'start release']) async def test_release(doof, test_repo, mocker, command): """ Doof should do a release when asked """ version = '1.2.3' pr = ReleasePR( version=version, url='http://new.url', body='Release PR body', ) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', side_effect=[None, pr, pr]) release_mock = mocker.async_patch('bot.release') wait_for_deploy_sync_mock = mocker.async_patch('bot.wait_for_deploy') authors = {'author1', 'author2'} mocker.async_patch('bot.get_unchecked_authors', return_value=authors) wait_for_checkboxes_sync_mock = mocker.async_patch('bot.Bot.wait_for_checkboxes') command_words = command.split() + [version] me = 'mitodl_user' await doof.run_command( manager=me, channel_id=test_repo.channel_id, words=command_words, ) org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock.assert_any_call( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) release_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, new_version=pr.version, ) wait_for_deploy_sync_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, hash_url=test_repo.rc_hash_url, watch_branch='release-candidate', ) assert doof.said("Now deploying to RC...") for channel_id in [test_repo.channel_id, ANNOUNCEMENTS_CHANNEL.channel_id]: assert doof.said( "These people have commits in this release", channel_id=channel_id, ) for author in authors: assert doof.said(author, channel_id=channel_id) assert wait_for_checkboxes_sync_mock.called is True # pylint: disable=too-many-locals async def test_hotfix_release(doof, test_repo, test_repo_directory, mocker): """ Doof should do a hotfix when asked """ mocker.patch( 'bot.init_working_dir', side_effect=async_context_manager_yielder(test_repo_directory) ) commit_hash = 'uthhg983u4thg9h5' version = '0.1.2' pr = ReleasePR( version=version, url='http://new.url', body='Release PR body', ) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', side_effect=[None, pr, pr]) release_mock = mocker.async_patch('bot.release') wait_for_deploy_sync_mock = mocker.async_patch('bot.wait_for_deploy') authors = {'author1', 'author2'} mocker.async_patch('bot.get_unchecked_authors', return_value=authors) wait_for_checkboxes_sync_mock = mocker.async_patch('bot.Bot.wait_for_checkboxes') old_version = "0.1.1" update_version_mock = mocker.patch('bot.update_version', autospec=True, return_value=old_version) command_words = ['hotfix', commit_hash] me = 'mitodl_user' await doof.run_command( manager=me, channel_id=test_repo.channel_id, words=command_words, ) org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock.assert_any_call( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) update_version_mock.assert_called_once_with("9.9.9", working_dir=test_repo_directory) release_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, new_version=pr.version, branch='release', commit_hash=commit_hash, ) wait_for_deploy_sync_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, hash_url=test_repo.rc_hash_url, watch_branch='release-candidate', ) assert doof.said("Now deploying to RC...") for channel_id in [test_repo.channel_id, ANNOUNCEMENTS_CHANNEL.channel_id]: assert doof.said( "These people have commits in this release", channel_id=channel_id, ) for author in authors: assert doof.said(author, channel_id=channel_id) assert wait_for_checkboxes_sync_mock.called is True @pytest.mark.parametrize("command", ['release', 'start release']) async def test_release_in_progress(doof, test_repo, mocker, command): """ If a release is already in progress doof should fail """ version = '1.2.3' url = 'http://fake.release.pr' mocker.async_patch('bot.get_release_pr', return_value=ReleasePR( version=version, url=url, body='Release PR body', )) command_words = command.split() + [version] with pytest.raises(ReleaseException) as ex: await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=command_words, ) assert ex.value.args[0] == "A release is already in progress: {}".format(url) @pytest.mark.parametrize("command", ['release', 'start release']) async def test_release_bad_version(doof, test_repo, command): """ If the version doesn't parse correctly doof should fail """ command_words = command.split() + ['a.b.c'] await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=command_words, ) assert doof.said( 'having trouble figuring out what that means', ) @pytest.mark.parametrize("command", ['release', 'start release']) async def test_release_no_args(doof, test_repo, command): """ If no version is given doof should complain """ command_words = command.split() await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=command_words, ) assert doof.said( "Careful, careful. I expected 1 words but you said 0.", ) async def test_release_library(doof, library_test_repo, mocker): """Do a library release""" version = '1.2.3' pr = ReleasePR( version=version, url='http://new.url', body='Release PR body', ) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', side_effect=[None, pr, pr]) release_mock = mocker.async_patch('bot.release') finish_release_mock = mocker.async_patch('bot.finish_release') wait_for_travis_sync_mock = mocker.async_patch('bot.wait_for_travis', return_value=TRAVIS_SUCCESS) command_words = ['release', version] me = 'mitodl_user' await doof.run_command( manager=me, channel_id=library_test_repo.channel_id, words=command_words, ) org, repo = get_org_and_repo(library_test_repo.repo_url) release_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=library_test_repo.repo_url, new_version=pr.version, ) wait_for_travis_sync_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, branch='release-candidate', ) get_release_pr_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) finish_release_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=library_test_repo.repo_url, version=version, timezone=doof.timezone ) assert doof.said( f"Merging evil scheme {pr.version} for {library_test_repo.name}..." ) assert doof.said( f"My evil scheme {pr.version} for {library_test_repo.name} has been released! Waiting for Travis..." ) assert doof.said( "My evil scheme {version} for {project} has been merged!".format( version=pr.version, project=library_test_repo.name, ) ) async def test_release_library_failure(doof, library_test_repo, mocker): """If a library release fails we shouldn't merge it""" version = '1.2.3' pr = ReleasePR( version=version, url='http://new.url', body='Release PR body', ) mocker.async_patch('bot.get_release_pr', side_effect=[None, pr, pr]) release_mock = mocker.async_patch('bot.release') finish_release_mock = mocker.async_patch('bot.finish_release') wait_for_travis_sync_mock = mocker.async_patch('bot.wait_for_travis', return_value=TRAVIS_FAILURE) command_words = ['release', version] me = 'mitodl_user' await doof.run_command( manager=me, channel_id=library_test_repo.channel_id, words=command_words, ) org, repo = get_org_and_repo(library_test_repo.repo_url) release_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=library_test_repo.repo_url, new_version=pr.version, ) wait_for_travis_sync_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, branch='release-candidate', ) assert finish_release_mock.call_count == 0 assert doof.said( f"Merging evil scheme {pr.version} for {library_test_repo.name}..." ) assert doof.said( "Uh-oh, it looks like, uh, coffee break's over. During the release Travis had a hiccup." ) @pytest.mark.parametrize("project_type", [WEB_APPLICATION_TYPE, LIBRARY_TYPE]) async def test_finish_release(doof, mocker, project_type): """ Doof should finish a release when asked """ version = '1.2.3' pr = ReleasePR( version=version, url='http://new.url', body='Release PR body', ) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=pr) finish_release_mock = mocker.async_patch('bot.finish_release') wait_for_deploy_prod_mock = mocker.async_patch('bot.Bot._wait_for_deploy_prod') test_repo = LIBRARY_TEST_REPO_INFO if project_type == LIBRARY_TYPE else WEB_TEST_REPO_INFO await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['finish', 'release'], ) org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) finish_release_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, version=version, timezone=doof.timezone ) assert doof.said(f"Merged evil scheme {version} for {test_repo.name}!") if project_type == WEB_APPLICATION_TYPE: assert doof.said('deploying to production...') wait_for_deploy_prod_mock.assert_called_once_with( doof, repo_info=test_repo ) async def test_finish_release_no_release(doof, test_repo, mocker): """ If there's no release to finish doof should complain """ get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=None) with pytest.raises(ReleaseException) as ex: await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['finish', 'release'], ) assert 'No release currently in progress' in ex.value.args[0] org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) async def test_delay_message(doof, test_repo, mocker): """ Doof should finish a release when asked """ now = datetime.now(tz=doof.timezone) seconds_diff = 30 future = now + timedelta(seconds=seconds_diff) next_workday_mock = mocker.patch('bot.next_workday_at_10', autospec=True, return_value=future) sleep_sync_mock = mocker.async_patch('asyncio.sleep') mocker.async_patch('bot.get_unchecked_authors', return_value={'author1'}) await doof.wait_for_checkboxes_reminder(repo_info=test_repo) assert doof.said( 'The following authors have not yet checked off their boxes for doof_repo: author1', ) assert next_workday_mock.call_count == 1 assert abs(next_workday_mock.call_args[0][0] - now).total_seconds() < 1 assert next_workday_mock.call_args[0][0].tzinfo.zone == doof.timezone.zone assert sleep_sync_mock.call_count == 1 assert abs(seconds_diff - sleep_sync_mock.call_args[0][0]) < 1 # pylint: disable=unsubscriptable-object async def test_webhook_different_callback_id(doof, mocker): """ If the callback id doesn't match nothing should be done """ finish_release_mock = mocker.patch( 'bot.finish_release', autospec=True ) await doof.handle_webhook( webhook_dict={ "token": "token", "callback_id": "xyz", "channel": { "id": "doof" }, "user": { "id": "doofenshmirtz" }, "message_ts": "123.45", "original_message": { "text": "Doof's original text", } }, ) assert finish_release_mock.called is False async def test_webhook_finish_release(doof, mocker): """ Finish the release """ pr_body = ReleasePR( version='version', url='url', body='body', ) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=pr_body) finish_release_mock = mocker.async_patch('bot.finish_release') wait_for_deploy_prod_mock = mocker.async_patch('bot.Bot._wait_for_deploy_prod') await doof.handle_webhook( webhook_dict={ "token": "token", "callback_id": FINISH_RELEASE_ID, "channel": { "id": "doof" }, "user": { "id": "doofenshmirtz" }, "message_ts": "123.45", "original_message": { "text": "Doof's original text", } }, ) repo_url = WEB_TEST_REPO_INFO.repo_url org, repo = get_org_and_repo(repo_url) wait_for_deploy_prod_mock.assert_any_call( doof, repo_info=WEB_TEST_REPO_INFO, ) get_release_pr_mock.assert_any_call( github_access_token=doof.github_access_token, org=org, repo=repo, ) finish_release_mock.assert_any_call( github_access_token=doof.github_access_token, repo_url=repo_url, version=pr_body.version, timezone=doof.timezone ) assert doof.said("Merging...") assert not doof.said("Error") async def test_webhook_finish_release_fail(doof, mocker): """ If finishing the release fails we should update the button to show the error """ get_release_pr_mock = mocker.async_patch('bot.get_release_pr') finish_release_mock = mocker.async_patch('bot.finish_release', side_effect=KeyError) with pytest.raises(KeyError): await doof.handle_webhook( webhook_dict={ "token": "token", "callback_id": FINISH_RELEASE_ID, "channel": { "id": "doof" }, "user": { "id": "doofenshmirtz" }, "message_ts": "123.45", "original_message": { "text": "Doof's original text", } }, ) assert get_release_pr_mock.called is True assert finish_release_mock.called is True assert doof.said("Merging...") assert doof.said("Error") async def test_webhook_start_release(doof, test_repo, mocker): """ Start a new release """ org, repo = get_org_and_repo(test_repo.repo_url) get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=None) release_mock = mocker.async_patch('bot.Bot._web_application_release') version = "3.4.5" await doof.handle_webhook( webhook_dict={ "token": "token", "callback_id": NEW_RELEASE_ID, "channel": { "id": "doof" }, "user": { "id": "doofenshmirtz" }, "message_ts": "123.45", "original_message": { "text": "Doof's original text", }, "actions": [ { "value": version, "name": "minor_release", } ] }, ) assert doof.said(f"Starting release {version}...") assert release_mock.call_count == 1 assert release_mock.call_args[0][1].args == [version] assert not doof.said("Error") get_release_pr_mock.assert_called_once_with(github_access_token=GITHUB_ACCESS, org=org, repo=repo) async def test_webhook_start_release_fail(doof, mocker): """ If starting the release fails we should update the button to show the error """ release_mock = mocker.patch('bot.Bot.release_command', autospec=True, side_effect=ZeroDivisionError) version = "3.4.5" with pytest.raises(ZeroDivisionError): await doof.handle_webhook( webhook_dict={ "token": "token", "callback_id": NEW_RELEASE_ID, "channel": { "id": "doof" }, "user": { "id": "doofenshmirtz" }, "message_ts": "123.45", "original_message": { "text": "Doof's original text", }, "actions": [ { "value": version, "name": "minor_release", } ] }, ) assert doof.said(f"Starting release {version}...") assert release_mock.call_count == 1 assert release_mock.call_args[0][1].args == [version] assert doof.said("Error") async def test_webhook_dismiss_release(doof): """ Delete the buttons in the message for a new release """ timestamp = "123.45" version = "3.4.5" await doof.handle_webhook( webhook_dict={ "token": "token", "callback_id": NEW_RELEASE_ID, "channel": { "id": "doof" }, "user": { "id": "doofenshmirtz" }, "message_ts": timestamp, "original_message": { "text": "Doof's original text", }, "actions": [ { "value": version, "name": "cancel", } ] }, ) assert doof.said(timestamp) assert not doof.said("Starting release") async def test_uptime(doof, mocker, test_repo): """Uptime should show how much time the bot has been awake""" later = doof.doof_boot + timedelta(seconds=140) mocker.patch('bot.now_in_utc', autospec=True, return_value=later) await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['uptime'], ) assert doof.said("Awake for 2 minutes.") async def test_reset(doof, test_repo): """Reset should cause a reset""" with pytest.raises(ResetException): await doof.run_command( manager='mitodl_user', channel_id=test_repo.channel_id, words=['reset'], ) @pytest.mark.parametrize("testing", [True, False]) async def test_upload_to_pypi(doof, library_test_repo, testing, mocker): """the upload_to_pypi command should start the upload process""" upload_to_pypi_patched = mocker.async_patch('bot.upload_to_pypi') @asynccontextmanager async def fake_init(*args, **kwargs): # pylint: disable=unused-argument """Fake empty contextmanager""" yield mocker.patch('bot.init_working_dir', side_effect=fake_init) pypi_server = "pypitest" if testing else "pypi" version = "3.4.5" await doof.run_command( manager='me', channel_id=library_test_repo.channel_id, words=['upload', 'to', pypi_server, version], ) upload_to_pypi_patched.assert_called_once_with( repo_info=library_test_repo, testing=testing, github_access_token=GITHUB_ACCESS, version=version, ) assert doof.said(f"Successfully uploaded {version} to {pypi_server}.") @pytest.mark.parametrize("command,project_type", [ ['version', LIBRARY_TYPE], ['wait for checkboxes', LIBRARY_TYPE], ['upload to pypi 1.2.3', WEB_APPLICATION_TYPE], ['upload to pypitest 1.2.3', WEB_APPLICATION_TYPE], ]) # pylint: disable=too-many-arguments async def test_invalid_project_type(doof, test_repo, library_test_repo, command, project_type): """ Compare incompatible commands with project types """ repo = test_repo if project_type == WEB_APPLICATION_TYPE else library_test_repo other_type = LIBRARY_TYPE if project_type == WEB_APPLICATION_TYPE else WEB_APPLICATION_TYPE await doof.run_command( manager='mitodl_user', channel_id=repo.channel_id, words=command.split(), ) assert doof.said(f'That command is only for {other_type} projects but this is a {project_type} project.') @pytest.mark.parametrize('command', [ 'release 1.2.3', 'start release 1.2.3', 'finish release', 'wait for checkboxes', 'upload to pypi 1.2.3', 'upload to pypitest 1.2.3', 'release notes', ]) async def test_command_without_repo(doof, command): """ Test that commands won't work on channels without a repo """ await doof.run_command( manager='mitodl_user', channel_id='not_a_repo_channel', words=command.split(), ) assert doof.said( 'That command requires a repo but this channel is not attached to any project.' ) @pytest.mark.parametrize("is_announcement", [True, False]) async def test_announcement(is_announcement, doof): """ Test that an announcement will get sent to multiple channels """ text = "some text here" await doof.say( channel_id=LIBRARY_TEST_REPO_INFO.channel_id, text=text, attachments=[{"some": "attachment"}], message_type="a message", is_announcement=is_announcement ) assert doof.said(text, channel_id=LIBRARY_TEST_REPO_INFO.channel_id) is True assert doof.said(text, channel_id=ANNOUNCEMENTS_CHANNEL.channel_id) is is_announcement async def test_help(doof): """ Test that doof will show help text """ await doof.run_command( manager='mitodl_user', channel_id='not_a_repo_channel', words=["help"], ) assert doof.said("*help*: Show available commands") @pytest.mark.parametrize("speak_initial, has_checkboxes", [ [True, False], [True, True], [False, False], [False, True], ]) async def test_wait_for_checkboxes( mocker, doof, test_repo, speak_initial, has_checkboxes ): """wait_for_checkboxes should poll github, parse checkboxes and see if all are checked""" org, repo = get_org_and_repo(test_repo.repo_url) pr = ReleasePR('version', f'https://github.com/{org}/{repo}/pulls/123456', 'body') get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=pr) get_unchecked_patch = mocker.async_patch('bot.get_unchecked_authors', side_effect=[ {'author1', 'author2', 'author3'}, {'author2'}, set(), ] if has_checkboxes else [set()]) doof.slack_users = [ {"profile": {"real_name": name}, "id": username} for (name, username) in [ ("Author 1", "author1"), ("Author 2", "author2"), ("Author 3", "author3"), ] ] sleep_sync_mock = mocker.async_patch('asyncio.sleep') me = 'mitodl_user' await doof.wait_for_checkboxes( manager=me, repo_info=test_repo, speak_initial=speak_initial, ) if speak_initial: assert doof.said("isn't evil enough until all the checkboxes are checked") get_unchecked_patch.assert_any_call( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) assert get_unchecked_patch.call_count == (3 if has_checkboxes else 1) assert sleep_sync_mock.call_count == (2 if has_checkboxes else 0) get_release_pr_mock.assert_called_once_with(github_access_token=GITHUB_ACCESS, org=org, repo=repo) if speak_initial or has_checkboxes: assert doof.said( "All checkboxes checked off. Release {version} is ready for the Merginator {name}".format( version=pr.version, name=format_user_id(me), ), attachments=[ { 'actions': [ { 'name': 'finish_release', 'text': 'Finish the release', 'type': 'button', "confirm": { "title": "Are you sure?", "ok_text": "Finish the release", "dismiss_text": "Cancel", } }, ], 'callback_id': 'finish_release', 'fallback': 'Finish the release' } ] ) if speak_initial: assert doof.said(f"PR is up at {pr.url}. These people have commits in this release") if has_checkboxes: assert not doof.said( "Thanks for checking off your boxes <@author1>, <@author2>, <@author3>!" ) assert doof.said( "Thanks for checking off your boxes <@author1>, <@author3>!" ) assert doof.said( "Thanks for checking off your boxes <@author2>!" ) # pylint: disable=too-many-arguments @pytest.mark.parametrize("repo_info, has_release_pr, has_expected", [ [WEB_TEST_REPO_INFO, False, False], [WEB_TEST_REPO_INFO, True, True], [LIBRARY_TEST_REPO_INFO, False, False], [LIBRARY_TEST_REPO_INFO, True, False], [ANNOUNCEMENTS_CHANNEL, False, False], [ANNOUNCEMENTS_CHANNEL, True, False], ]) async def test_startup(doof, mocker, repo_info, has_release_pr, has_expected): """ Test that doof will show help text """ doof.repos_info = [repo_info] release_pr = ReleasePR( version="version", url=repo_info.repo_url, body='Release PR body', ) mocker.async_patch('bot.get_release_pr', return_value=( release_pr if has_release_pr else None )) wait_for_checkboxes_mock = mocker.async_patch('bot.Bot.wait_for_checkboxes') wait_for_deploy_mock = mocker.async_patch('bot.Bot.wait_for_deploy') await doof.startup() # iterate once through event loop await asyncio.sleep(0) assert not doof.said("isn't evil enough until all the checkboxes are checked") if has_expected: wait_for_checkboxes_mock.assert_called_once_with(doof, manager=None, repo_info=repo_info, speak_initial=False) wait_for_deploy_mock.assert_called_once_with(doof, repo_info=repo_info) else: assert wait_for_checkboxes_mock.call_count == 0 assert wait_for_deploy_mock.call_count == 0 @pytest.mark.parametrize("needs_deploy_rc", [True, False]) @pytest.mark.parametrize("needs_deploy_prod", [True, False]) async def test_wait_for_deploy(doof, test_repo, needs_deploy_rc, needs_deploy_prod, mocker): """bot.wait_for_deploy should check if deploys are needed for RC or PROD""" def _is_release_deployed(branch, **kwargs): # pylint: disable=unused-argument """Helper function to provide right value for is_release_deployed""" if branch == "release": return not needs_deploy_prod elif branch == "release-candidate": return not needs_deploy_rc raise Exception("Unexpected branch") is_release_deployed_mock = mocker.async_patch( 'bot.is_release_deployed', side_effect=_is_release_deployed ) wait_for_deploy_rc_mock = mocker.async_patch('bot.Bot._wait_for_deploy_rc') wait_for_deploy_prod_mock = mocker.async_patch('bot.Bot._wait_for_deploy_prod') await doof.wait_for_deploy(repo_info=test_repo) is_release_deployed_mock.assert_any_call( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, hash_url=test_repo.prod_hash_url, branch="release", ) is_release_deployed_mock.assert_any_call( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, hash_url=test_repo.rc_hash_url, branch="release-candidate", ) if needs_deploy_rc: wait_for_deploy_rc_mock.assert_called_once_with(doof, repo_info=test_repo) else: assert wait_for_deploy_rc_mock.called is False if needs_deploy_prod: wait_for_deploy_prod_mock.assert_called_once_with(doof, repo_info=test_repo) else: assert wait_for_deploy_prod_mock.called is False async def test_wait_for_deploy_rc(doof, test_repo, mocker): """Bot._wait_for_deploy_prod should wait until repo has been deployed to RC""" wait_for_deploy_mock = mocker.async_patch('bot.wait_for_deploy') get_unchecked_patch = mocker.async_patch( 'bot.get_unchecked_authors', return_value={'author1', 'author2', 'author3'} ) org, repo = get_org_and_repo(test_repo.repo_url) release_pr = ReleasePR('version', f'https://github.com/{org}/{repo}/pulls/123456', 'body') get_release_pr_mock = mocker.async_patch('bot.get_release_pr', return_value=release_pr) await doof._wait_for_deploy_rc(repo_info=test_repo) # pylint: disable=protected-access assert doof.said('These people have commits in this release') wait_for_deploy_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, hash_url=test_repo.rc_hash_url, watch_branch='release-candidate' ) get_unchecked_patch.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) get_release_pr_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, ) async def test_wait_for_deploy_prod(doof, test_repo, mocker): """Bot._wait_for_deploy_prod should wait until repo has been deployed to production""" wait_for_deploy_mock = mocker.async_patch('bot.wait_for_deploy') version = "1.2.345" get_version_tag_mock = mocker.async_patch('bot.get_version_tag', return_value="v{}".format(version)) await doof._wait_for_deploy_prod(repo_info=test_repo) # pylint: disable=protected-access get_version_tag_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, commit_hash="origin/release", ) assert doof.said('has been released to production') wait_for_deploy_mock.assert_called_once_with( github_access_token=GITHUB_ACCESS, repo_url=test_repo.repo_url, hash_url=test_repo.prod_hash_url, watch_branch='release' ) async def test_issue_release_notes(doof, test_repo, mocker): """issue release notes should list closed issues over the last seven days""" org, repo = get_org_and_repo(test_repo.repo_url) channel_id = test_repo.channel_id pr = make_pr(123, "A PR") fetch_prs = mocker.patch('bot.fetch_pull_requests_since_date', return_value=[pr]) tups = [ (pr, [(make_issue(333), make_parsed_issue(333, False))]) ] fetch_issues = mocker.patch('bot.fetch_issues_for_pull_requests', return_value=async_gen_wrapper(tups)) notes = "some release notes" make_release_notes = mocker.patch('bot.make_issue_release_notes', return_value=notes) await doof.issue_release_notes(CommandArgs( repo_info=test_repo, channel_id=test_repo.channel_id, args=[], manager="me" )) assert doof.said("Release notes for issues closed by PRs", channel_id=channel_id) assert doof.said(notes, channel_id=channel_id) fetch_prs.assert_called_once_with( github_access_token=GITHUB_ACCESS, org=org, repo=repo, since=(now_in_utc() - timedelta(days=7)).date() ) fetch_issues.assert_called_once_with( github_access_token=GITHUB_ACCESS, pull_requests=[pr], ) make_release_notes.assert_called_once_with( tups, ) async def test_handle_event_message(doof): """ Doof should handle messages appropriately """ channel = "a channel" await doof.handle_event( webhook_dict={ "token": "token", "type": "event_callback", "event": { "type": "message", "channel": channel, "text": f"<@{doof.doof_id}> hi", "user": "manager", } } ) assert doof.said("hello!") async def test_handle_event_no_callback(doof, mocker): """ If it's not a callback event, ignore it """ log_info = mocker.patch('bot.log.info') handle_message = mocker.patch('bot.Bot.handle_message') await doof.handle_event( webhook_dict={ "token": "token", "type": "different_kind", } ) assert "Received event other than event callback or challenge" in log_info.call_args[0][0] assert handle_message.called is False async def test_handle_event_not_a_message(doof, mocker): """ If the event is not a message type, ignore it """ log_info = mocker.patch('bot.log.info') handle_message = mocker.patch('bot.Bot.handle_message') await doof.handle_event( webhook_dict={ "token": "token", "type": "event_callback", "event": { "type": "other_kind", } } ) assert "Received event other than message" in log_info.call_args[0][0] assert handle_message.called is False async def test_handle_event_no_message(doof, mocker): """ If it's an empty message, ingore it """ handle_message = mocker.patch('bot.Bot.handle_message') await doof.handle_event( webhook_dict={ "token": "token", "type": "event_callback", "event": { "type": "message", "text": "", "user": "manager", } } ) assert handle_message.called is False async def test_handle_event_message_changed(doof, mocker): """ Edits to messages are currently ignored """ handle_message = mocker.patch('bot.Bot.handle_message') await doof.handle_event( webhook_dict={ "token": "token", "type": "event_callback", "event": { "type": "message", "subtype": "message_changed", "text": f"<@{doof.doof_id}> hi", "channel": "Channel", "user": "manager", } } ) assert handle_message.called is False
from customers.Aurora.surgery.surgery_mappings import PROC_NM from lib.master_fake_data_generator import FakeDataGenerator class AURORASurgeryFakeDataGenerator(FakeDataGenerator): def generate_pipeline_row(self, row: str, file_size: int) -> dict: f = self._faker r = self._random start, end = self.create_start_end_date() surgery = { "PAT_ID": f"{r.randint(1,file_size)}", "SURGERY_ID": f"{row+1}", "PAT_ENC_CSN_ID": self.random_or_empty(f"{r.randint(1,file_size)}"), "PROCEDURE_ID": f.random_number(), "PANEL": f.random_element(['1', '3', '2', '5', '4']), "PANEL_PRIMARY_PHYSICIAN_ID": self.random_or_empty(f"{r.randint(1,file_size)}"), "PERFORMED_YN": f.random_element(['Y', 'N']), "PROCEDURE_NM": f.random_element(PROC_NM), "PATIENT_IN_ROOM_DTTM": self.random_or_empty( f"{start}T{self.get_time_string()}:000-05:00"), "PATIENT_OUT_ROOM_DTTM": self.random_or_empty( f"{end}T{self.get_time_string()}:000-05:00"), "PROCESS_EXTRACT_DTTM": f"{self.get_current_date()}:{r.randint(100,999)}-05:00", } return surgery
import psycopg2 from setup import * from connection import Connection from pprint import pprint class RegisteredCustomer(Connection): def __init__(self, first_name, last_name, city, email, password): self.first_name = first_name self.last_name = last_name self.city = city self.login = email self.password = password def get_self_info(self, selector=''): role = 'customer' if self._connectDb(self.login, self.password, role): table = ('customer',) fields = ('*',) selector = f"""where first_name = '{self.first_name}' and last_name = '{self.last_name}' """ result = self._getData(table, fields, selector) return result else: return 'Incorrect login or password' def create_order(self, data): role = 'customer' if self._connectDb(self.login, self.password, role): table = 'orders' result = self._postData(table, data) return result else: return 'Incorrect login or password' def delete_order(self, selector): role = 'customer' if self._connectDb(self.login, self.password, role): table = 'orders' selector = f"date_of_order = '{selector}'" result = self._deleteData(table, selector) return result else: return 'Incorrect login or password' def get_product_info(self, category, selector=''): role = 'customer' if self._connectDb(self.login, self.password, role): table = ('product p inner join product_category pc ',) fields = ('product_name', 'unit_price',) categoryes = ['product_name', 'unit_price'] if category and category in categoryes and selector: where = f"""where {category} = '{selector}'""" else: where = '' selector = f"""on p.category_name = pc.id {where}""" result = self._getData(table, fields, selector) fieldNames = ["product_name", "unit_price", ] сhangeRes = [] for item in result: cort = {} for index, element in enumerate(item): cort[fieldNames[index]] = element сhangeRes.append(cort) return сhangeRes else: return 'Incorrect login or password' if __name__ == '__main__': cust = RegisteredCustomer( 'Mavk', 'Kvam', 2, 'mavklog', 'mavkpass') # ------------------------------- # data = [{ # 'employee_id': 1, # 'city_id': 2, # 'date_of_order': '2021-04-10', # 'customer_id': 2, # 'product_id': 2, # 'price': 252 # }] # put = cust.create_order(data) # print(put) # ------------------------------- # orders = cust.get_product_info() # print(orders) # ------------------------------- # idf = cust._getNextId('orders') # print(idf) # ------------------------------- # dele = cust.delete_order('2021-04-10') # print(dele) pprint(cust.get_product_info('unit_price', '40'))
from . import views from django.urls import path urlpatterns = [ path('' , views.home, name = "home"), path('addDirector', views.addDirector, name = "addDirector") ]
#-*- coding:utf-8 -*- from sys import argv script, user_name = argv prompt = ' uhm.. ' print "Hi %s, I'm the %s script." % (user_name, script) print "I'd like to ask you a few questions." print "Do you like me %s?" % user_name likes = raw_input(prompt) print "Where do you live %s?" % user_name lives = raw_input(prompt) print "What kind of computer do you have?" computer = raw_input(prompt) print """ Alright, so you said %r about liking me. You live in %r.¡¡Not sure where that is. And you have a %r computer.¡¡Nice. """ % (likes, lives, computer)
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import argparse import json import os from pants_release.common import VERSION_PATH, sorted_contributors from pants_release.git import git from pants.util.dirutil import safe_mkdir from pants.util.strutil import softwrap VERSION = VERSION_PATH.read_text().strip() def announcement_text() -> str: cur_version_sha, prev_version_sha = git( "log", "-2", "--pretty=format:%h", str(VERSION_PATH) ).splitlines(keepends=False) git_range = f"{prev_version_sha}..{cur_version_sha}" all_contributors = sorted_contributors(git_range) new_contributors = sorted( [ line[3:] for line in git("diff", git_range, "CONTRIBUTORS.md").splitlines(keepends=False) if line.startswith("++ ") ] ) announcement = softwrap( f"""\ Pants {VERSION} is now available! To upgrade, set `pants_version="{VERSION}"` in the `[GLOBAL]` section of your pants.toml. """ ) if "dev" in VERSION or "a" in VERSION: announcement += "\n\nThanks to all the contributors to this release:\n\n" for contributor in all_contributors: announcement += contributor announcement += "\n" if new_contributors: announcement += "\nAnd a special shoutout to these first-time contributors:\n\n" for contributor in new_contributors: announcement += contributor announcement += "\n" announcement += ( "\nWelcome to the Pants community! We appreciate your contributions, " "and look forward to more of them in the future." ) else: announcement += "\n\nThanks to all the contributors to this release!" return announcement def main() -> None: parser = argparse.ArgumentParser() parser.add_argument( "--output-dir", required=True, help="Generate announcement messages into this directory.", ) options = parser.parse_args() safe_mkdir(options.output_dir) def dump(basename: str, text: str) -> None: with open(os.path.join(options.output_dir, basename), "w") as fp: fp.write(text) announcement = announcement_text() dump( "slack_announcement.json", json.dumps( { "blocks": [ { "type": "section", "text": { "type": "mrkdwn", "text": announcement, }, }, ] } ), ) dump("email_announcement_subject.txt", f"Pants {VERSION} is released") dump("email_announcement_body.md", announcement) if __name__ == "__main__": main()
#!/usr/bin/env /proj/sot/ska/bin/python ################################################################################################################# # # # compute_bias_data.py: extract bias related data # # # # author: t. isobe (tisobe@cfa.harvard.edu) # # # # Last Update: Dec 23, 2014 # # # ################################################################################################################# import os import sys import re import string import random import operator import math import numpy import astropy.io.fits as pyfits import unittest # #--- reading directory list # path = '/data/mta/Script/ACIS/Bad_pixels/house_keeping/bias_dir_list_py' f = open(path, 'r') data = [line.strip() for line in f.readlines()] f.close() for ent in data: atemp = re.split(':', ent) var = atemp[1].strip() line = atemp[0].strip() exec "%s = %s" %(var, line) # #--- append a path to a private folder to python directory # sys.path.append(bin_dir) sys.path.append(mta_dir) # #--- converTimeFormat contains MTA time conversion routines # import convertTimeFormat as tcnv import mta_common_functions as mcf import bad_pix_common_function as bcf # #--- temp writing file name # rtail = int(10000 * random.random()) zspace = '/tmp/zspace' + str(rtail) #--------------------------------------------------------------------------------------------------- #--- find_today_data: find which data to use for the data anaysis --- #--------------------------------------------------------------------------------------------------- def find_today_data(comp_test=''): """ find which data to use for the data anaysis Input: comp_test if it is 'test', read testdata data are also read from <hosue_keeping>/past_input_data /dsops/ap/sdp/cache/*/acis/*bias0.fits Output: today_data ---- a list of fits files to be used """ if comp_test == "test": # #--- test case # cmd = 'ls /data/mta/Script/ACIS/Bad_pixels/house_keeping/Test_data_save/Test_data/* >' + zspace os.system(cmd) today_data = mcf.readFile(zspace) mcf.rm_file(zspace) else: # #--- normal case # file = house_keeping + 'past_input_data' data1 = mcf.readFile(file) try: atemp = re.split('\/', data1[len(data1)-1]) btemp = re.split('_', atemp[5]) cut_date = btemp[0] + btemp[1] + btemp[2] cut_date = int(cut_date) except: cut_date = 0 file2 = house_keeping + 'past_input_data~' cmd = 'mv ' + file + ' ' + file2 os.system(cmd) # #--- read the current data list # cmd = 'ls /dsops/ap/sdp/cache/*/acis/*bias0.fits >' + zspace os.system(cmd) f = open(zspace, 'r') data2 = [line.strip() for line in f.readlines()] f.close() mcf.rm_file(zspace) today_data = [] fo = open(file, 'w') for ent in data2: fo.write(ent) fo.write('\n') chk = 0 for comp in data1: if ent == comp: chk = 1 break if chk == 0: atemp = re.split('\/', ent) btemp = re.split('_', atemp[5]) date = btemp[0] + btemp[1] + btemp[2] if int(date) > cut_date: today_data.append(ent) fo.close() return today_data #--------------------------------------------------------------------------------------------------- #--- extract_bias_data: extract bias data using a given data list ---- #--------------------------------------------------------------------------------------------------- def extract_bias_data(today_data, comp_test=''): """ extract bias data using a given data list Input: today_data --- a list of data fits files comp_test --- if 'test', test will be run also need: <house_keeping>/Defect/bad_col_list --- a list of known bad columns Output: <data_dir>/Bias_save/CCD<ccd>/quad<quad> see more in write_bias_data() <data_dir>/Info_dir/CCD<ccd>/quad<quad> see more in printBiasInfo() """ stime_list = [] for dfile in today_data: # #--- check whether file exists # chk = mcf. chkFile(dfile) if chk == 0: continue # #--- extract time stamp # stime = bcf.extractTimePart(dfile) if stime < 0: continue # #--- extract CCD information # [ccd_id, readmode, date_obs, overclock_a, overclock_b, overclock_c, overclock_d] = bcf.extractCCDInfo(dfile) if readmode != 'TIMED': continue bad_col0 = [] bad_col1 = [] bad_col2 = [] bad_col3 = [] line = house_keeping + 'Defect/bad_col_list' data = mcf.readFile(line) for ent in data: # #--- skip none data line # m = re.search('#', ent) if m is not None: continue atemp = re.split(':', ent) dccd = int(atemp[0]) if dccd == ccd_id: val = int(atemp[1]) if val <= 256: bad_col0.append(val) elif val <= 512: val -= 256 bad_col1.append(val) elif val <= 768: val -= 512 bad_col2.append(val) elif val <= 1024: val -= 768 bad_col3.append(val) # #--- trim the data at the threshold = 4000 # f = pyfits.open(dfile) sdata = f[0].data sdata[sdata < 0] = 0 sdata[sdata > 4000] = 0 f.close() # #--- compte and write out bias data # result_list = bcf.extractBiasInfo(dfile) if comp_test == 'test': return result_list else: [fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, \ overclock_a, overclock_b, overclock_c, overclock_d] = result_list write_bias_data(sdata, ccd_id, 0, overclock_a, stime, bad_col0) write_bias_data(sdata, ccd_id, 1, overclock_b, stime, bad_col1) write_bias_data(sdata, ccd_id, 2, overclock_c, stime, bad_col2) write_bias_data(sdata, ccd_id, 3, overclock_d, stime, bad_col3) # #---- more bias info # printBiasInfo(ccd_id, 0, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_a) printBiasInfo(ccd_id, 1, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_b) printBiasInfo(ccd_id, 2, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_c) printBiasInfo(ccd_id, 3, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_d) stime_list.append(stime) # #--- now count how many CCDs are used for a particular observations and write out to list_of_ccd_no # countObservation(stime_list) #--------------------------------------------------------------------------------------------------- #-- write_bias_data: extract and write out bias data --- #--------------------------------------------------------------------------------------------------- def write_bias_data(sdata, ccd, quad, overclock, stime, bad_col): """ extract and write out bias data Input: sdata ---- numpy array of 2D bias iamge ccd ---- ccd # quad ---- quad # overclock --- a list of overclock values stime --- a list of time stamps bad_col --- a list of known bad columns also need: ./comb.fits --- created in extract_bias_data() Output: <data_dir>/Bias_save/CCD<ccd>/quad<quad> this contains: time ---- time in sec from 1998.1.1 avg ---- average vaule of the entire surface except bad column regions std ---- standard deviation fo the eitire surface overclock --- overclock value """ if quad == 0: start = 1 end = 256 elif quad == 1: start = 257 end = 512 elif quad == 2: start = 513 end = 768 elif quad == 3: start = 769 end = 1024 # #--- extract the part (quad) we need # tdata = sdata[1:1204, int(start):int(end)] # #--- compute average and std; if the column is listed in bad col list, skip it # sum = 0.0 scnt = 0.0 lsave = [-999 for x in range(0,255)] for i in range(0, 255): csum = 0.0 chk = 0 for col in bad_col: if int(col) == i: chk = 1 break if chk == 1: continue for j in range(0, 1023): csum += tdata[j, i] sum += tdata[j, i] scnt += 1.0 lsave[i] = csum / 1023.0 if scnt > 0.0: # #--- find average and then coumpute std # avg = float(sum) / float(scnt) sum2 = 0.0 scnt = 0.0 for j in range(0, 255): if lsave[j] >= 0: diff = lsave[j] - avg sum2 += diff * diff scnt += 1.0 std = math.sqrt(float(sum2)/ float(scnt)) bias_out = data_dir + 'Bias_save/CCD'+ str(ccd) + '/quad' + str(quad) fo = open(bias_out, 'a') line = "%10.1f\t%4.2f\t%4.2f\t%s.0\n" % (stime, avg, std, overclock) fo.write(line) fo.close() #--------------------------------------------------------------------------------------------------- #-- printBiasInfo: create files containing bias file information --- #--------------------------------------------------------------------------------------------------- def printBiasInfo(ccd, quad, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock): """ create files containing bias file information Input: ccd --- ccd # quad --- quad # steim --- time stamp in DOM fep --- fep value dmode --- mode FAINT, VFAINT etc srow --- starting row rowcnt --- # of rows orcmode --- ORC mode biasalg --- bias algorithm barg0, barg1, barg2,barg3 --- biasarg0-3 overclock-- overclock value Output: <data_dir>/Info_dir/CCD<ccd>/quad<quad> --- this contains all above information in that order """ ofile = data_dir + '/Info_dir/CCD' + str(ccd) + '/quad' + str(quad) fo = open(ofile, 'a') line = "%10.1f\t%4.2f\t" % (stime, overclock) fo.write(line) line = str(dmode) + '\t' + str(fep) + '\t' + str(srow) + '\t' + str(rowcnt) + '\t' line = line + str(orcmode) + '\t' + str(dgain) + '\t' + str(biasalg) + '\t' line = line + str(barg0) + '\t' + str(barg1) + '\t' + str(barg2) + '\t' + str(barg3) + '\n' fo.write(line) fo.close() #--------------------------------------------------------------------------------------------------- #-- countObservation: count how many CCDs are used for a particular observations and write it out -- #--------------------------------------------------------------------------------------------------- def countObservation(stime_list): """ count how many CCDs are used for a particular observations and write out to list_of_ccd_no Input: stime_list: a list of time stamps used today Output: <data_dir>/Info_dir/list_of_ccd_no """ if len(stime_list) > 0: sorted_list = sorted(stime_list) line = data_dir + '/Info_dir/list_of_ccd_no' fo = open(line, 'a') comp = sorted_list[0] cnt = 1 for i in range(1, len(sorted_list)): if sorted_list[i] == comp: cnt += 1 else: line = str(comp) + '\t' + str(cnt) + '\n' fo.write(line) comp = sorted_list[i] cnt = 1 line = str(comp) + '\t' + str(cnt) + '\n' fo.write(line) fo.close() #----------------------------------------------------------------------------------------- #-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST --- #----------------------------------------------------------------------------------------- class TestFunctions(unittest.TestCase): """ testing functions """ #------------------------------------------------------------ def test_compute_bias_data(self): today_data = find_today_data('test') # #--- extract bias reated data # out = extract_bias_data(today_data, 'test') test_data = [1, 'VFAINT', 0, 1023, 0, 0, 1, 10, 26, 20, 26, 806, 577, 724, 670] self.assertEquals(out, test_data) #-------------------------------------------------------------------- if __name__ == '__main__': unittest.main()
# utils.py: utility functions import numpy as np import math # seconds to blocks def seconds_to_blocks(stream, secs): return int(math.ceil( float(secs) * stream.sample_rate / stream.block_size )) # hertz to index in fft def hz_to_fft(stream, hz): return np.clip( int(float(hz) * stream.block_size / stream.sample_rate), 0, stream.block_size ) # index in fft to hertz def fft_to_hz(stream, ind): return float(ind) * stream.sample_rate / stream.block_size # sketchy ASCII art depiction of fft def sketch(stream, spec, scale=1.0/4000, minfreq=400, maxfreq=4000, width=80): ret = '' asc = " `.~:<=tIYAXWH#M" for f in np.logspace(np.log10(minfreq), np.log10(maxfreq), num=width): ind = min(len(asc)-1, int(spec[ hz_to_fft(stream, f) ]*scale)) ret += asc[ind] return ret
from typing import Generator import logging import boto3 import os SSM_PATH_FORMAT = '/bastion/{environment}/instance_id' def list_to_dict(obj, key="Key", value="Value"): return {e[key]: e[value] for e in obj} def update_ssm_params(asg_names): updater = UpdateSSMParamStore() for asg_name in asg_names: updater.update(asg_name) class UpdateSSMParamStore: def __init__(self): self._autoscaling_client = boto3.client("autoscaling") self._ssm_client = boto3.client("ssm") def update(self, asg_name): logging.info(asg_name) self._put_instance_id_into_ssm_param(asg_name) def _put_instance_id_into_ssm_param(self, asg_name: str): logging.info("Processing AutoScaling Group: %s", asg_name) environment = self._asg_env_from_asg_name(asg_name).upper() ssm_env_key = f"{environment}_ASG_INSTANCE_SSM_PATH" ssm_path = os.environ[ssm_env_key] for instance_id in self._instance_ids_from_asg_name(asg_name): logging.info('Storing %s in %s', instance_id, ssm_path) return self._ssm_client.put_parameter( Name=ssm_path, Overwrite=True, Value=instance_id, ) def _asg_env_from_asg_name(self, asg_name: str): for asg in self._describe_asg(asg_name): return asg["ASGTags"]["Environment"] def __format_asg(self, asg): asg["ASGTags"] = list_to_dict(asg["Tags"]) return asg def _describe_asg(self, asg_name: str) -> Generator: for asg in self._autoscaling_client.describe_auto_scaling_groups( AutoScalingGroupNames=[asg_name] )["AutoScalingGroups"]: yield self.__format_asg(asg) def _instances_from_asg_name(self, asg_name: str) -> Generator: """Gets Instances from ASG""" for asg in self._describe_asg(asg_name): for instance in asg["Instances"]: logging.debug(instance["InstanceId"]) yield instance def _instance_ids_from_asg_name(self, asg_name: str) -> list: """Gets Instance IDs from ASG""" return [i["InstanceId"] for i in self._instances_from_asg_name(asg_name)]
from compilador.objects.quadruple import Quadruple from router_solver import * import compilador.objects.function_table import compilador.objects.symbol from compilador.objects.symbol import Symbol from compilador.objects.function_table import * from compilador.objects.symbol import * import sys import re # ARCHIVO CON FUNCIONES DE AYUDA AL PARSER # CONVIERTEN INPUT DEL PARSER A SYMBLOS # Diccionario de operandos a simbolos operators = { "+": Symbol("ADD", "operation"), "-": Symbol("SUB", "operation"), "*": Symbol("MUL", "operation"), "/": Symbol("DIV", "operation"), "%": Symbol("MOD", "operation"), "(": Symbol("OP", "parentheses"), ")": Symbol("CP", "parentheses"), "[": Symbol("OSB", "parentheses"), "]": Symbol("CSB", "parentheses"), "!": Symbol("NOT", "not"), "=": Symbol("EQ", "assignment"), "<": Symbol("LT", "comparison"), ">": Symbol("GT", "comparison"), "<=": Symbol("LTE", "comparison"), ">=": Symbol("GTE", "comparison"), "==": Symbol("BEQ", "matching"), "!=": Symbol("BNEQ", "matching"), "||": Symbol("OR", "matching"), "&&": Symbol("AND", "matching"), "+=": Symbol("ADDEQ", "assignment_operation"), "-=": Symbol("SUBEQ", "assignment_operation"), "*=": Symbol("MULEQ", "assignment_operation"), "/=": Symbol("DIVEQ", "assignment_operation"), "%=": Symbol("MODEQ", "assignment_operation"), "read": Symbol("READ", "read"), "write": Symbol("WRITE", "write"), "jump_left": Symbol("JL", "obj_method"), "jump_right": Symbol("JR", "obj_method"), "jump_up": Symbol("JU", "obj_method"), "jump_down": Symbol("JD", "obj_method"), } # Aplana una lista con listas anidadas def flatten_list(data): flat_list = [] if type(data) != list: return flat_list for element in data: if type(element) == list: flat_list += flatten_list(element) elif element is not None: flat_list.append(element) return flat_list # Conviererte parametros a symbolos def get_parameters(line): paramlist = [] line = flatten_list(line) currlist = line if len(line) > 0: while len(currlist) > 0: paramlist.append(Symbol(currlist[1], currlist[0])) currlist.pop(1) currlist.pop(0) return paramlist # Convierte la lista del parser a un string def expresion_to_string(expression): if type(expression) != list: return str(expression) else: expression = flatten_list(expression) str_exp = "" for ele in expression: if ele is not None: str_exp += str(ele) return str_exp # Convierte declaración de variables a simbolos def get_variables(type, line): line = flatten_list(line) varList = {} while line[0] != ";": if line[0] == ",": line.pop(0) elif line[1] == "=": currSymbol = Symbol(line[0], type) line = line[2:] varList.update({currSymbol: expresion_to_string(line[:-1])}) line = line[-1] elif line[1] == "[": dim_1 = [] dim_2 = [] while line[2] != "]": dim_1.append(line[2]) line.pop(2) dim_1 = expresion_to_string(dim_1) line.pop(2) line.pop(1) if line[1] == "[": while line[2] != "]": dim_2.append(line[2]) line.pop(2) dim_2 = expresion_to_string(dim_2) line.pop(2) line.pop(1) if line[1] == "=": print("ERROR: Can't assign a value dimensioned type in declaration") sys.exit() if len(dim_2) > 0: dim_1 currSymbol = Symbol(line[0], type, dimension_sizes=[dim_1, dim_2]) else: currSymbol = Symbol(line[0], type, dimension_sizes=[dim_1]) varList.update({currSymbol: None}) line.pop(0) else: currSymbol = Symbol(line[0], type) varList.update({currSymbol: None}) line.pop(0) return varList # Busca variable con asignación en declaración de variable def dec_to_as(exp): if "=" not in exp: print("ERROR: Error trying to assing value in declaration") sys.exit() else: loc = exp.index("=") - 1 return exp[loc:-1] # Evalua constantes y les asigna un tipo def constant_eval(const): patterns = { "INT": r"(\d+|-\d+)", "FLT": r"(\d+\.\d+|-\d+\.\d+)", "CHAR": r'("|\')([^\"|^\'])("|\')', "BOOL": r"(?:true|false)", "NULL": r"null", "STR": r'("|\')([^\"|^\'])*("|\')', } for type, reg in patterns.items(): result = re.match(reg, str(const)) if result: if result.start() == 0 and result.end() == (len(str(const))): return type return None # Valida que la cantidad de dimensiones sean validas def validate_dimensions(symbol): dim_list_input = symbol.dimension_sizes dim_list_output = [] for d in dim_list_input: d = int(d) if d > 0: dim_list_output.append(d) if len(dim_list_input) == len(dim_list_output): return dim_list_output else: return None # Crea diccionario de dimensiones para un arreglo def format_array_dimensions(exp): data = { "name": exp.pop(0), "dim": [], } dim_1 = [] dim_2 = [] stack = [] stack.append(exp[0]) dim_1.append(exp.pop(0)) while len(exp) > 0 and len(stack) > 0: if exp[0].name == "OSB": stack.append(exp[0]) elif exp[0].name == "CSB": stack.pop() dim_1.append(exp.pop(0)) dim_1.pop(0) dim_1.pop(-1) data["dim"].append(dim_1) if len(exp) > 0 and exp[0].name == "OSB": stack.append(exp[0]) dim_2.append(exp.pop(0)) while len(exp) > 0 and len(stack) > 0: if exp[0].name == "OSB": stack.append(exp[0]) elif exp[0].name == "CSB": stack.pop() dim_2.append(exp.pop(0)) dim_2.pop(0) dim_2.pop(-1) data["dim"].append(dim_2) return data # Ayuda con la consistencia de los objetos simbolo al generar cuadruplo def modify_quad_object(exp, ft): ele = [exp.operand_1, exp.operand_2, exp.result_id] result = [] for e in ele: if e != None and ft.get_function_variable_table(e.scope).lookup_variable( e.name ): result.append( ft.get_function_variable_table(e.scope).get_var_symbol(e.name) ) elif e != None and ft.lookup_temporal(e): result.append(ft.get_temporal(e)) else: result.append(e) return Quadruple(exp.operator, result.pop(0), result.pop(0), result.pop(0)) # Convierte una expresión del parser en una lista de simbolos def expresion_to_symbols(exp, ft, s, d=None): if type(exp) != list: exp = [exp] else: exp = flatten_list(exp) sym_list = [] if d: exp = dec_to_as(exp) for e in exp: if e in operators: op = operators[e] op.set_scope(s.get_curr_state_table()) sym_list.append(op) elif ft.lookup_function(e) and ("(" in exp and ")" in exp): ret_loc = ( ft.get_function_variable_table(s.get_global_table()) .get_var_symbol(e) .get_return_location() ) if ret_loc != None: sym_list.append(ret_loc) else: sym_list.append( Symbol(e, ft.get_function_type(e), s.get_global_table()) ) stack = [] count = exp[exp.index(e) :].index("(") + exp.index(e) stack.append(exp[count]) del exp[count] while len(stack) > 0 and count < len(exp): if exp[count] == "(": stack.append(exp[count]) elif exp[count] == ")": stack.pop() exp.pop(count) elif ft.get_function_variable_table(s.get_curr_state_table()).lookup_variable( e ): sym_list.append( ft.get_function_variable_table(s.get_curr_state_table()).get_var_symbol( e ) ) elif ft.get_function_variable_table(s.get_global_table()).lookup_variable(e): sym_list.append( ft.get_function_variable_table(s.get_global_table()).get_var_symbol(e) ) else: c_type = constant_eval(e) if c_type != None: sym_list.append(Symbol(e, c_type, "Constant Segment")) else: print('ERROR: token "' + str(e) + '" not valid or not found') sys.exit() return sym_list
import unittest import sys, os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) import grok correct_examples = [ ("%{NUMBER}", "1"), ("%{NUMBER}", "-1"), ("%{NUMBER}", "1.0"), ("%{NUMBER}", "-2.0"), ("%{NUMBER}", "+2.0"), ("%{NUMBER}", "11"), ("%{NUMBER}", "-11"), ("%{NUMBER}", "11.0"), ("%{NUMBER}", "-22.0"), ("%{NUMBER}", "+22.0"), ("%{IPV4}", "1.1.1.1"), ("%{IP}", "1.1.1.1"), ("%{IPV6}", "::"), ("%{IPV6}", "2001:db8:3333:4444:5555:6666:7777:8888"), ("%{INT}", "+0000"), ("%{MONTHDAY}", "17"), ("%{MONTH}", "May"), (" %{MONTH} ", " May "), ("%{MONTH}", "Jan"), ("%{YEAR}", "2015"), ("%{TIME}", "10:05:03"), ("%{HTTPDATE}", "17/May/2015:10:05:03 +0000"), ("%{MONTHDAY}/%{YEAR}", "17/2015"), ("%{MONTH}/%{YEAR}", "May/2015"), ("%{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT}", "17/May/2015:10:05:03 +0000") ] incorrect_examples = [ ("%{NUMBER}", "1,0"), ("%{NUMBER}", "2-2"), ("%{NUMBER}", "asdpoasdk"), ("%{NUMBER}", "*2"), ] class TestCommonPatterns(unittest.TestCase): def subtest_correct_match(self): for key, val in correct_examples: with self.subTest(msg=f"Checking pattern {key}", key=key, val=val): pat = grok.GrokPattern(f"^{key}$") self.assertIsNotNone(pat.match(val), f"{key} should match {val}") def subtest_incorrect_match(self): for key, val in correct_examples: with self.subTest(msg=f"Checking pattern {key}", key=key, val=val): pat = grok.GrokPattern(f"^{key}$") self.assertIsNotNone(pat.match(val), f"{key} should match {val}") def build_assert_not_none(key, val): def tmp(self): pat = grok.GrokPattern(f"^{key}$") self.assertIsNotNone(pat.match(val), f"{key} should match {val}") return tmp def build_assert_none(key, val): def tmp(self): pat = grok.GrokPattern(f"^{key}$") self.assertIsNone(pat.match(val), f"{key} should not match {val}") return tmp counter = {} for key, val in correct_examples: if key not in counter: counter[key] = 0 counter[key] += 1 setattr(TestCommonPatterns, f"test_correct_match_{key}_{counter[key]}", build_assert_not_none(key, val)) counter = {} for key, val in incorrect_examples: if key not in counter: counter[key] = 0 counter[key] += 1 setattr(TestCommonPatterns, f"test_incorrect_match_{key}_{counter[key]}", build_assert_none(key, val)) if __name__ == '__main__': unittest.main()
# #placeholder for general utility functions # def default_dic(defaults,actual,inplace=False,clobber=False): """ return a composit of defaults and actual dictionaries """ #make a copy of actual if inplace: if clobber: defaults.clear() defaults.update(actual) else: if clobber: return actual.copy() else: return dict(defaults,**actual) # #loop through defaults # #if value in final, use that, otherwise use default # for k,v in defaults.iteritems(): # final[k]=final.get(k,v) # return final # class DefDic(object): # """ # default dictionary # """ # def __init__(self, # Clobber=False, # **kwargs # ): # """ # define a default dictionary # *Parameters* # Dic: dict # default dictionary # Clobber: bool # whether to clobber defaults # """ # self.Dic={} # self.set_Dic(Clobber,**kwargs) # def get_Dic(self,Clobber=False,**kwargs): # """ # get combined Dictionary # """ # if Clobber: # return kwargs # else: # return default_dic(self.Dic,kwargs) # def set_Dic(self,Clobber=False,**kwargs): # """ # set default dictionary # """ # self.Dic=self.get_Dic(Clobber,**kwargs) # def __repr__(self): # return repr(self.Dic)
# Вы решили написать преобразователь кода на Python в код на Java. Так как на Java принят стандарт наименования CamelCase, то вы решили научиться преобразовывать имена из underscore в этот формат. # Для начала напишите программу, которая переводит имена переменных из стиля написания underscore в стиль UpperCamelCase. # Стиль underscore характеризуется тем, что каждое слово в имени пишется с маленькой буквы, а слова разделяются между собой символом подчёркивания "_". Стиль UpperCamelCase означает, что каждое слово пишется с большой буквы и разделителей между словами нет. # Формат ввода: # Одна строка, содержащая имя, записанное в формате underscore. # Формат вывода: # Строка, содержащая пришедшее имя в формате UpperCamelCase. # Sample Input 1: # my_first_class # Sample Output 1: # MyFirstClass # Sample Input 2: # a # Sample Output 2: # A print("".join((s.capitalize() for s in input().strip().split("_"))))
m = int(input()) for c in range(1, m + 1): print(c, c ** 2, c ** 3) print(c, c ** 2 + 1, c ** 3 + 1)
import sys, string, math u = input() L = list(u) for i in range(0,len(L),2) : L[i],L[i+1] = L[i+1],L[i] re = ''.join(L) print(re)
import hashlib f = open("rainbowtable.txt",'w') for i in range(10000000, 100000000): session = "%dsalt_for_you" % i h = session for j in range(0,500): h = hashlib.sha1(h.encode('utf-8')).hexdigest() data = session + " - " + h + "\n" f.write(data) f.close()
from ..model import Conference, Room, Speaker, Event, SimpleTZ import re import logging from datetime import date, datetime try: from lxml import etree except ImportError: import xml.etree.ElementTree as etree RE_DATE = re.compile(r'(\d{4})-(\d\d)-(\d\d)') RE_MINUTE = re.compile(r'^(\d+):(\d+)(?: ([AP]M))?$') RE_TIMEZONE = re.compile(r'\d\d:?\d\d(Z|[+-]\d\d:?\d\d)') DATE_ISO_FORMAT = '%Y-%m-%dT%H:%M:%S%z' def from_minutes(dur): if dur is None: return None if ':' in dur: parts = [int(x) for x in dur.split(':')] return parts[0] * 60 + parts[1] return round(float(dur)) def parse_time_with_day(timestr, day): m = RE_MINUTE.match(timestr) if m: hour = int(m.group(1)) if m.group(3) == 'PM' and hour < 12: hour += 12 elif m.group(3) == 'AM' and hour == 12: hour = 0 return datetime(day.year, day.month, day.day, hour, int(m.group(2))) if 'T' in timestr: return datetime.strptime(timestr, DATE_ISO_FORMAT) raise ValueError('Unknown format: {}'.format(timestr)) def find_timezone(timestr): m = RE_TIMEZONE.search(timestr) if m: return SimpleTZ(m.group(1)) return None def getttext(root, tag): n = root.find(tag) if n is None or n.text is None: return None return n.text.strip() class FrabXmlImporter: name = 'xml' def check(self, head): for k in ('conference', 'day', 'title', 'room', 'event', 'start', 'end'): if '<'+k not in head: return False return True def parse(self, fileobj): root = etree.parse(fileobj).getroot() xconf = root.find('conference') conf = Conference(getttext(xconf, 'title')) conf.timeslot = from_minutes(getttext(xconf, 'timeslot_duration')) conf.slug = getttext(xconf, 'acronym') conf.url = getttext(xconf, 'base_url') or getttext(xconf, 'baseurl') speakers = {} for xday in root.findall('day'): m = RE_DATE.match(xday.get('date')) day = date(int(m.group(1)), int(m.group(2)), int(m.group(3))) conf.days.add(day) for xroom in xday.findall('room'): room = Room(xroom.get('name')) conf.rooms.add(room) for xevent in xroom.findall('event'): title = getttext(xevent, 'title') event = Event(title, id=xevent.get('id'), guid=xevent.get('guid')) event.room = room event.start = parse_time_with_day(getttext(xevent, 'start'), day) if xevent.find('date') is not None: timezone = find_timezone(getttext(xevent, 'date')) if not conf.timezone: conf.timezone = timezone elif timezone != conf.timezone: logging.warning( 'Error: timezone %s in %s differs from last timezone %s', timezone, getttext(xevent, 'date'), conf.timezone) duration = getttext(xevent, 'duration') if not duration: continue event.duration = from_minutes(duration) event.subtitle = getttext(xevent, 'subtitle') event.slug = getttext(xevent, 'slug') event.url = getttext(xevent, 'url') event.subtitle = getttext(xevent, 'subtitle') rec = xevent.find('recording') if rec is None: event.license = None event.can_record = True else: event.license = getttext(rec, 'license') event.can_record = getttext(rec, 'optout') != 'true' event.language = getttext(xevent, 'language') event.track = getttext(xevent, 'track') event.abstract = getttext(xevent, 'abstract') event.description = getttext(xevent, 'description') for xperson in xevent.find('persons'): person_id = xperson.get('id') if person_id not in speakers: speakers[person_id] = Speaker(xperson.text, id=person_id) event.speakers.append(speakers[person_id]) conf.events.append(event) return conf
# Generated by Django 3.1 on 2020-08-10 10:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('record_label', '0004_auto_20200810_0947'), ] operations = [ migrations.AlterField( model_name='release', name='description', field=models.CharField(blank=True, max_length=1000, null=True), ), ]
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Mar 22 16:40:50 2018 @author: thomas """ import os dir_path = os.path.dirname(os.path.realpath(__file__)) + '/' cwd = os.getcwd() St = os.path.basename(cwd) Re = os.path.split(os.path.dirname(cwd))[-1] print('Re = ',Re) start=0 end=19 OpenDatabase("localhost:"+dir_path+"viz2D/dumps.visit") DeleteActivePlots() SetTimeSliderState(0) AddPlot("Vector", "U") AddPlot("Pseudocolor","Omega") '''AddOperator("Slice",1) SliceAtts = SliceAttributes() SliceAtts.originType = SliceAtts.Intercept # Point, Intercept, Percent, Zone, Node SliceAtts.originPoint = (0, 0, 0) SliceAtts.originIntercept = 0 SliceAtts.originPercent = 0 SliceAtts.originZone = 0 SliceAtts.originNode = 0 SliceAtts.normal = (0, 0, 1) SliceAtts.axisType = SliceAtts.ZAxis # XAxis, YAxis, ZAxis, Arbitrary, ThetaPhi SliceAtts.upAxis = (0, 1, 0) SliceAtts.project2d = 1 SliceAtts.interactive = 1 SliceAtts.flip = 0 SliceAtts.originZoneDomain = 0 SliceAtts.originNodeDomain = 0 SliceAtts.meshName = "amr_mesh" SliceAtts.theta = 0 SliceAtts.phi = 90 SetOperatorOptions(SliceAtts)''' AddOperator("Resample",1) ResampleAtts = ResampleAttributes() ResampleAtts.useExtents = 1 ResampleAtts.startX = -4 ResampleAtts.endX = 4 ResampleAtts.samplesX = 512 ResampleAtts.startY = -4 ResampleAtts.endY = 4 ResampleAtts.samplesY = 512 ResampleAtts.is3D = 0 ResampleAtts.startZ = 0 ResampleAtts.endZ = 1 ResampleAtts.samplesZ = 10 ResampleAtts.tieResolver = ResampleAtts.random # random, largest, smallest ResampleAtts.tieResolverVariable = "default" ResampleAtts.defaultValue = 0 ResampleAtts.distributedResample = 1 ResampleAtts.cellCenteredOutput = 0 SetOperatorOptions(ResampleAtts) DrawPlots() #for i in range(20): # Change time states #SetTimeSliderState(start+i) #DrawPlots() # Export the database (ALL TIMES) dbAtts = ExportDBAttributes() dbAtts.allTimes = 1 #ALL TIMES dbAtts.db_type = "VTK" dbAtts.dirname = dir_path+"VTK/" #dbAtts.filename = "Re5.Time%04d" % (start+i) dbAtts.filename = "RAW" # Tuple of variables to export. "default" means the plotted variable. # Note that we're also exporting the vector expression "vec" dbAtts.variables = ("default", "amr_mesh","levels", "patches","U") ExportDatabase(dbAtts) SaveSession(dir_path+"Export2VTK2D.session") exit()
import json from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse @csrf_exempt def index(request): return HttpResponse("Hello API!")
# General transport settings which can be added to any UnknownQuantity import numpy as np from .. DREAMException import DREAMException TRANSPORT_NONE = 1 TRANSPORT_PRESCRIBED = 2 TRANSPORT_RECHESTER_ROSENBLUTH = 3 TRANSPORT_SVENSSON = 4 INTERP3D_NEAREST = 0 INTERP3D_LINEAR = 1 INTERP1D_NEAREST = 0 INTERP1D_LINEAR = 1 SVENSSON_INTERP1D_PARAM_TIME = 1 SVENSSON_INTERP1D_PARAM_IP = 2 BC_CONSERVATIVE = 1 # Assume no flux through r=rmax BC_F_0 = 2 # Assume f=0 outside the plasma BC_DF_CONST = 3 # Assume that df/dr is constant on the plasma boundary class TransportSettings: def __init__(self, kinetic=False): """ Constructor. :param bool kinetic: If ``True``, the coefficient will be assumed kinetic (4D). Otherwise fluid (2D). """ self.kinetic = kinetic self.type = TRANSPORT_NONE # Prescribed advection self.ar = None self.ar_t = None self.ar_r = None self.ar_p = None self.ar_xi = None self.ar_ppar = None self.ar_pperp = None self.ar_interp3d = None # Prescribed diffusion self.drr = None self.drr_t = None self.drr_r = None self.drr_p = None self.drr_xi = None self.drr_ppar = None self.drr_pperp = None self.drr_interp3d = None # Svensson pstar self.pstar = None ## Should interp1d_param be here or inside the advection and diffusion classes? self.interp1d_param = None # Svensson advection self.s_ar = None self.s_ar_r = None self.s_ar_t = None self.s_ar_p = None self.s_ar_xi = None self.s_ar_ppar = None self.s_ar_pperp = None self.s_ar_interp3d = None self.s_ar_interp1d = None # Svensson diffusion self.s_drr = None self.s_drr_r = None self.s_drr_t = None self.s_drr_p = None self.s_drr_xi = None self.s_drr_ppar = None self.s_drr_pperp = None self.s_drr_interp3d = None self.s_drr_interp1d = None # Rechester-Rosenbluth (diffusive) transport self.dBB = None self.dBB_t = None self.dBB_r = None self.boundarycondition = BC_CONSERVATIVE def isKinetic(self): return self.kinetic def prescribeAdvection(self, ar, t=None, r=None, p=None, xi=None, ppar=None, pperp=None): """ Set the advection coefficient to use. """ self._prescribeCoefficient('ar', coeff=ar, t=t, r=r, p=p, xi=xi, ppar=ppar, pperp=pperp) def prescribeDiffusion(self, drr, t=None, r=None, p=None, xi=None, ppar=None, pperp=None): """ Set the diffusion coefficient to use. """ self._prescribeCoefficient('drr', coeff=drr, t=t, r=r, p=p, xi=xi, ppar=ppar, pperp=pperp) def setSvenssonPstar(self,pstar): """ Set the lower momentum bound for the runaway, radial transport, region. """ self.pstar=float(pstar) def setSvenssonInterp1dParam(self, interp1d_param=SVENSSON_INTERP1D_PARAM_TIME): """ Set the lower momentum bound for the runaway, radial transport, region. """ self.interp1d_param = int(interp1d_param) def setBoundaryCondition(self, bc=None): """ Set the type of boundary condition. (Default is BC_CONSERVATIVE) """ self.boundarycondition = bc def setSvenssonAdvection(self, ar, t=None, Ip=None, r=None, p=None, xi=None, ppar=None, pperp=None, interp3d=INTERP3D_LINEAR, interp1d=INTERP1D_LINEAR): r""" Set the Svensson advection coefficient to use. :param ar: Advection coefficient, :math:`A_r(t,r,\xi_0,p)` or :math:`A_r(I_p,r,\xi_0,p)`. :param t: Time vector for which ``ar`` is defined (if ``Ip`` is not provided). :param Ip: Plasma current vector for which ``ar`` is defined (if ``t`` is not provided). :param r: Radial grid vector for which ``ar`` is defined. :param p: Momentum grid vector for which ``ar`` is defined. :param xi: Pitch grid vector for which ``ar`` is defined. :param interp3d: Interpolation method to use when interpolating in (r,xi,p) part of coefficient. :param interp1d: Interpolation method to use when interpolating in time/Ip variable. """ if self.interp1d_param == SVENSSON_INTERP1D_PARAM_TIME: if t is not None: self._prescribeCoefficient('s_ar', coeff=ar, t=t, r=r, p=p, xi=xi, ppar=ppar, pperp=pperp,interp3d=interp3d,override_kinetic=True) else: raise TransportException('interp1d_param has been set to "time", but no time variable was given.') elif self.interp1d_param == SVENSSON_INTERP1D_PARAM_IP: if Ip is not None: self._prescribeCoefficient('s_ar', coeff=ar, t=Ip, r=r, p=p, xi=xi, ppar=ppar, pperp=pperp,interp3d=interp3d,override_kinetic=True) else: raise TransportException('interp1d_param has been set to "Ip", but no plasma-current variable was given.') else: raise TransportException('interp1d_param has not been set or is invalid. It must be set before setting the Svensson transport coefficients.') self.type = TRANSPORT_SVENSSON self.s_ar_interp1d = interp1d def setSvenssonDiffusion(self, drr, t=None, Ip=None, r=None, p=None, xi=None, ppar=None, pperp=None,interp3d=INTERP3D_LINEAR, interp1d=INTERP1D_LINEAR): r""" Set the Svensson diffusion coefficient to use. :param drr: Diffusion coefficient, :math:`D_{rr}(t,r,\xi_0,p)` or :math:`D_{rr}(I_p,r,\xi_0,p)`. :param t: Time vector for which ``drr`` is defined (if ``Ip`` is not provided). :param Ip: Plasma current vector for which ``drr`` is defined (if ``t`` is not provided). :param r: Radial grid vector for which ``drr`` is defined. :param p: Momentum grid vector for which ``drr`` is defined. :param xi: Pitch grid vector for which ``drr`` is defined. :param interp3d: Interpolation method to use when interpolating in (r,xi,p) part of coefficient. :param interp1d: Interpolation method to use when interpolating in time/Ip variable. """ if self.interp1d_param == SVENSSON_INTERP1D_PARAM_TIME: if t is not None: self._prescribeCoefficient('s_drr', coeff=drr, t=t, r=r, p=p, xi=xi, ppar=ppar, pperp=pperp,interp3d=interp3d,override_kinetic=True) else: raise TransportException('interp1d_param has been set to "time", but no time variable was given.') elif self.interp1d_param == SVENSSON_INTERP1D_PARAM_IP: if Ip is not None: self._prescribeCoefficient('s_drr', coeff=drr, t=Ip, r=r, p=p, xi=xi, ppar=ppar, pperp=pperp,interp3d=interp3d,override_kinetic=True) else: raise TransportException('interp1d_param has been set to "Ip", but no plasma-current variable was given.') else: raise TransportException('interp1d_param has not been set or is invalid. It must be set before setting the Svensson transport coefficients.') self.type = TRANSPORT_SVENSSON self.s_drr_interp1d = interp1d def _prescribeCoefficient(self, name, coeff, t=None, r=None, p=None, xi=None, ppar=None, pperp=None,interp3d=INTERP3D_LINEAR, override_kinetic=False): """ General method for prescribing an advection or diffusion coefficient. """ self.type = TRANSPORT_PRESCRIBED setattr(self, name+'_interp3d', interp3d) if np.isscalar(coeff): r = np.array([0]) t = np.array([0]) p = np.array([0]) xi = np.array([0]) if self.kinetic or override_kinetic: coeff = coeff * np.ones((1,)*4) else: coeff = coeff * np.ones((1,)*2) r = np.asarray(r) t = np.asarray(t) if r.ndim != 1: r = np.reshape(r, (r.size,)) if t.ndim != 1: t = np.reshape(t, (t.size,)) if (self.kinetic == False and not override_kinetic) and len(coeff.shape) == 2: setattr(self, name, coeff) setattr(self, name+'_r', r) setattr(self, name+'_t', t) elif (self.kinetic == True or override_kinetic) and len(coeff.shape) == 4: # Verify that the momentum grid is given if p is not None and xi is not None: ppar, pperp = None, None elif ppar is not None and pperp is not None: p, xi = None, None else: raise TransportException("No momentum grid provided for the 4D transport coefficient.") setattr(self, name, coeff) setattr(self, name+'_r', r) setattr(self, name+'_t', t) if p is not None: setattr(self, name+'_p', p) setattr(self, name+'_xi', xi) else: setattr(self, name+'_ppar', ppar) setattr(self, name+'_pperp', pperp) else: raise TransportException("Invalid dimensions of prescribed coefficient: {}. Expected {} dimensions.".format(coeff.shape, 4 if (self.kinetic or override_kinetic) else 2)) def setMagneticPerturbation(self, dBB, t=None, r=None): """ Prescribes the evolution of the magnetic perturbation level (dB/B). :param dBB: Magnetic perturbation level. :param t: Time grid on which the perturbation is defined. :param r: Radial grid on which the perturbation is defined. """ self.type = TRANSPORT_RECHESTER_ROSENBLUTH if np.isscalar(dBB): dBB = dBB * np.ones((1,1)) r = np.array([0]) t = np.array([0]) r = np.asarray(r) t = np.asarray(t) if r.ndim != 1: r = np.reshape(r, (r.size,)) if t.ndim != 1: t = np.reshape(t, (t.size,)) self.dBB_r = r self.dBB_t = t self.dBB = dBB def setBoundaryCondition(self, bc): """ Set the boundary condition to use for the transport. """ self.boundarycondition = bc def fromdict(self, data): """ Set all options from a dictionary. """ # Prescribed advection self.ar = None self.ar_r = None self.ar_t = None self.ar_p = None self.ar_xi = None self.ar_ppar = None self.ar_pperp = None self.ar_interp3d =None # Prescribed diffusion self.drr = None self.drr_r = None self.drr_t = None self.drr_p = None self.drr_xi = None self.drr_ppar = None self.drr_pperp = None self.drr_interp3d =None # Svensson pstar self.pstar = None self.interp1d_param = None # Svensson advection self.s_ar = None self.s_ar_r = None self.s_ar_t = None self.s_ar_p = None self.s_ar_xi = None self.s_ar_ppar = None self.s_ar_pperp = None self.s_ar_interp3d = None self.s_ar_interp1d = None # Svensson diffusion self.s_drr = None self.s_drr_r = None self.s_drr_t = None self.s_drr_p = None self.s_drr_xi = None self.s_drr_ppar = None self.s_drr_pperp = None self.s_drr_interp3d = None self.s_drr_interp1d = None # Rechester--Rosenbluth self.dBB = None self.dBB_r = None self.dBB_t = None if 'type' in data: self.type = data['type'] if 'boundarycondition' in data: self.boundarycondition = data['boundarycondition'] if 'ar' in data: self.ar = data['ar']['x'] self.ar_r = data['ar']['r'] self.ar_t = data['ar']['t'] self.ar_interp3d = data['ar']['interp3d'] if self.kinetic: if 'p' in data['ar']: self.ar_p = data['ar']['p'] if 'xi' in data['ar']: self.ar_xi = data['ar']['xi'] if 'ppar' in data['ar']: self.ar_ppar = data['ar']['ppar'] if 'pperp' in data['ar']: self.ar_pperp = data['ar']['pperp'] if 'drr' in data: self.drr = data['drr']['x'] self.drr_r = data['drr']['r'] self.drr_t = data['drr']['t'] self.drr_interp3d = data['drr']['interp3d'] if self.kinetic: if 'p' in data['drr']: self.drr_p = data['drr']['p'] if 'xi' in data['drr']: self.drr_xi = data['drr']['xi'] if 'ppar' in data['drr']: self.drr_ppar = data['drr']['ppar'] if 'pperp' in data['drr']: self.drr_pperp = data['drr']['pperp'] if 'pstar' in data: self.pstar = data['pstar'] if 'interp1d_param' in data: self.interp1d_param = data['interp1d_param'] if 's_ar' in data: self.s_ar = data['s_ar']['x'] self.s_ar_r = data['s_ar']['r'] self.s_ar_t = data['s_ar']['t'] self.s_ar_interp3d = data['s_ar']['interp3d'] self.s_ar_interp1d = data['s_ar']['interp1d'] if 'p' in data['s_ar']: self.s_ar_p = data['s_ar']['p'] if 'xi' in data['s_ar']: self.s_ar_xi = data['s_ar']['xi'] if 'ppar' in data['s_ar']: self.s_ar_ppar = data['s_ar']['ppar'] if 'pperp' in data['s_ar']: self.s_ar_pperp = data['s_ar']['pperp'] if 's_drr' in data: self.s_drr = data['s_drr']['x'] self.s_drr_r = data['s_drr']['r'] self.s_drr_t = data['s_drr']['t'] self.s_drr_interp3d = data['s_drr']['interp3d'] self.s_drr_interp1d = data['s_drr']['interp1d'] if 'p' in data['s_drr']: self.s_drr_p = data['s_drr']['p'] if 'xi' in data['s_drr']: self.s_drr_xi = data['s_drr']['xi'] if 'ppar' in data['s_drr']: self.s_drr_ppar = data['s_drr']['ppar'] if 'pperp' in data['s_drr']: self.s_drr_pperp = data['s_drr']['pperp'] if 'dBB' in data: self.dBB = data['dBB']['x'] self.dBB_r = data['dBB']['r'] self.dBB_t = data['dBB']['t'] def todict(self): """ Returns these settings as a dictionary. """ data = { 'type': self.type, 'boundarycondition': self.boundarycondition } # Advection? if self.type == TRANSPORT_PRESCRIBED and self.ar is not None: data['ar'] = { 'x': self.ar, 'r': self.ar_r, 't': self.ar_t, 'interp3d': self.ar_interp3d } if self.kinetic: if self.ar_p is not None: data['ar']['p'] = self.ar_p data['ar']['xi'] = self.ar_xi else: data['ar']['ppar'] = self.ar_ppar data['ar']['pperp'] = self.ar_pperp # Diffusion? if self.type == TRANSPORT_PRESCRIBED and self.drr is not None: data['drr'] = { 'x': self.drr, 'r': self.drr_r, 't': self.drr_t, 'interp3d': self.drr_interp3d } if self.kinetic: if self.drr_p is not None: data['drr']['p'] = self.drr_p data['drr']['xi'] = self.drr_xi else: data['drr']['ppar'] = self.drr_ppar data['drr']['pperp'] = self.drr_pperp # Svensson pstar if self.type == TRANSPORT_SVENSSON and self.pstar is not None: data['pstar'] = self.pstar # Svensson 1d interpolatino method if self.type == TRANSPORT_SVENSSON and self.interp1d_param is not None: data['interp1d_param'] = self.interp1d_param # Svensson Advection? if self.type == TRANSPORT_SVENSSON and self.s_ar is not None: data['s_ar'] = { 'x': self.s_ar, 'r': self.s_ar_r, 't': self.s_ar_t, 'interp3d': self.s_ar_interp3d, 'interp1d': self.s_ar_interp1d, } if self.s_ar_p is not None: data['s_ar']['p'] = self.s_ar_p data['s_ar']['xi'] = self.s_ar_xi else: data['s_ar']['ppar'] = self.s_ar_ppar data['s_ar']['pperp'] = self.s_ar_pperp # Svensson Diffusion? if self.type == TRANSPORT_SVENSSON and self.s_drr is not None: data['s_drr'] = { 'x': self.s_drr, 'r': self.s_drr_r, 't': self.s_drr_t, 'interp3d': self.s_drr_interp3d, 'interp1d': self.s_drr_interp1d, } if self.s_drr_p is not None: data['s_drr']['p'] = self.s_drr_p data['s_drr']['xi'] = self.s_drr_xi else: data['s_drr']['ppar'] = self.s_drr_ppar data['s_drr']['pperp'] = self.s_drr_pperp if self.type == TRANSPORT_RECHESTER_ROSENBLUTH and self.dBB is not None: data['dBB'] = { 'x': self.dBB, 'r': self.dBB_r, 't': self.dBB_t } return data def verifySettings(self): """ Verify that the settings are consistent. """ if self.type == TRANSPORT_NONE: pass elif self.type == TRANSPORT_PRESCRIBED: self.verifySettingsCoefficient('ar') self.verifySettingsCoefficient('drr') self.verifyBoundaryCondition() elif self.type == TRANSPORT_SVENSSON: self.verifySettingsCoefficient('s_ar',override_kinetic=True) self.verifySettingsCoefficient('s_drr',override_kinetic=True) if self.pstar is None or type(self.pstar) != float: raise TransportException("pstar not defined or wrong type.") elif self.pstar<=0: raise TransportException("pstar = %0.3f <= 0 not allowed." % self.pstar) self.verifyBoundaryCondition() elif self.type == TRANSPORT_RECHESTER_ROSENBLUTH: self.verifySettingsRechesterRosenbluth() self.verifyBoundaryCondition() else: raise TransportException("Unrecognized transport type: {}".format(self.type)) def verifyBoundaryCondition(self): """ Verify that the boundary condition has been correctly configured. """ bcs = [BC_CONSERVATIVE, BC_F_0, BC_DF_CONST] if self.boundarycondition not in bcs: raise TransportException("Invalid boundary condition specified for transport: {}".format(self.boundarycondition)) def verifySettingsCoefficient(self, coeff, override_kinetic=False): """ Verify consistency of the named prescribed transport coefficient. """ g = lambda v : self.__dict__[coeff+v] c = g('') if c is None: return if self.kinetic or override_kinetic: if c.ndim != 4: raise TransportException("{}: Invalid dimensions of transport coefficient: {}".format(coeff, c.shape)) elif g('_t').ndim != 1 or g('_t').size != c.shape[0]: raise TransportException("{}: Invalid dimensions of time vector. Expected {} elements.".format(coeff, c.shape[0])) elif g('_r').ndim != 1 or g('_r').size != c.shape[1]: raise TransportException("{}: Invalid dimensions of radius vector. Expected {} elements.".format(coeff, c.shape[1])) if g('_interp3d') not in [INTERP3D_LINEAR, INTERP3D_NEAREST]: raise TransportException("{}: Invalid value assigned to interp3d.".format(coeff)) if coeff+'v' in self.__dict__: if g('_interp1d') not in [INTERP1D_LINEAR, INTERP1D_NEAREST]: raise TransportException("{}: Invalid value assigned to interp1d.".format(coeff)) if g('_p') is not None or g('_xi') is not None: if g('_xi').ndim != 1 or g('_xi').size != c.shape[2]: raise TransportException("{}: Invalid dimensions of xi vector. Expected {} elements.".format(coeff, c.shape[2])) elif g('_p').ndim != 1 or g('_p').size != c.shape[3]: raise TransportException("{}: Invalid dimensions of p vector. Expected {} elements.".format(coeff, c.shape[3])) elif g('_ppar') is not None or g('_pperp') is not None: if g('_pperp').ndim != 1 or g('_pperp').size != c.shape[2]: raise TransportException("{}: Invalid dimensions of pperp vector. Expected {} elements.".format(coeff, c.shape[2])) elif g('_ppar').ndim != 1 or g('_ppar').size != c.shape[3]: raise TransportException("{}: Invalid dimensions of ppar vector. Expected {} elements.".format(coeff, c.shape[3])) else: raise TransportException("No momentum grid provided for transport coefficient '{}'.".format(coeff)) else: if c.ndim != 2: raise TransportException("{}: Invalid dimensions of transport coefficient: {}".format(coeff, c.shape)) elif g('_t').ndim != 1 or g('_t').size != c.shape[0]: raise TransportException("{}: Invalid dimensions of time vector. Expected {} elements.".format(coeff, c.shape[0])) elif g('_r').ndim != 1 or g('_r').size != c.shape[1]: raise TransportException("{}: Invalid dimensions of radius vector. Expected {} elements.".format(coeff, c.shape[1])) def verifySettingsRechesterRosenbluth(self): """ Verify consistency of the Rechester-Rosenbluth transport settings. """ if self.dBB.ndim != 2: raise TransportException("Rechester-Rosenbluth: Invalid dimensions of transport coefficient: {}".format(self.dBB.shape)) elif self.dBB_t.ndim != 1 or self.dBB_t.size != self.dBB.shape[0]: raise TransportException("Rechester-Rosenbluth: Invalid dimensions of time vector. Expected {} elements.".format(self.dBB.shape[0])) elif self.dBB_r.ndim != 1 or self.dBB_r.size != self.dBB.shape[1]: raise TransportException("Rechester-Rosenbluth: Invalid dimensions of radius vector. Expected {} elements.".format(self.dBB.shape[1])) class TransportException(DREAMException): def __init__(self, msg): super().__init__(msg)
# Definitions of enums and slices used throughout the code from enum import Enum class Met(Enum): """Enum of the metrics/coordinate systems supported by HARM""" MINKOWSKI = 0 MKS = 1 #MMKS = 2 # TODO put back support? FMKS = 3 # Exotic metrics from KORAL et al EKS = 4 MKS3 = 5 # For conversions, etc KS = 6 class Loci(Enum): """Location enumerated value. Locations are defined by: ^ theta |---------------------- | | | | |FACE1 CENT | | | |CORN FACE2 | -------------------------> R With FACE3 as the plane in phi""" FACE1 = 0 FACE2 = 1 FACE3 = 2 CENT = 3 CORN = 4 class Var(Enum): """All possible variables HARM supports. May not all be used in a given run""" RHO = 0 UU = 1 U1 = 2 U2 = 3 U3 = 4 B1 = 5 B2 = 6 B3 = 7 KTOT = 8 KEL = 9 class Slices: """These slices can be constructed easily and define the bulk (all physical) fluid zones, separately from the ghost zones used for MPI syncing and boundary conditions Careful not to use the slices on arrays which are themselves slices of the whole! (TODO fix this requirement?) """ def __init__(self, G): # Slices to represent variables, to add to below for picking out e.g. bulk of RHO self.allv = (slice(None),) self.RHO = (Var.RHO.value,) self.UU = (Var.UU.value,) self.U1 = (Var.U1.value,) self.U2 = (Var.U2.value,) self.U3 = (Var.U3.value,) self.B1 = (Var.B1.value,) self.B2 = (Var.B2.value,) self.B3 = (Var.B3.value,) self.KTOT = (Var.KTOT.value,) self.KEL = (Var.KEL.value,) # Single slices for putting together operations in bounds.py. May be replaced by loopy kernels # Name single slices for character count ng = G.NG self.a = slice(None) self.b = slice(ng, -ng) self.bulk = (self.b, self.b, self.b) self.all = (slice(None),slice(None),slice(None)) # "Halo" of 1 zone self.bh1 = slice(ng - 1, -ng + 1) self.bulkh1 = (self.bh1, self.bh1, self.bh1) # For manual finite-differencing. Probably very slow self.diffr1 = (slice(ng + 1, -ng + 1), self.b, self.b) self.diffr2 = (self.b, slice(ng + 1, -ng + 1), self.b) self.diffr3 = (self.b, self.b, slice(ng + 1, -ng + 1)) # Name boundaries slices for readability # Left side self.ghostl = slice(0, ng) self.boundl = slice(ng, 2*ng) self.boundl_r = slice(2 * ng, ng, -1) # Reverse self.boundl_o = slice(ng, ng + 1) # Outflow (1-zone slice for replication) # Right side self.ghostr = slice(-ng, None) self.boundr = slice(-2 * ng, -ng) self.boundr_r = slice(-ng, -2 * ng, -1) self.boundr_o = slice(-ng - 1, -ng) def geom_slc(self, slc): return slc[:2] + (None,) class Shapes: def __init__(self, G): # Shapes for allocation self.geom_scalar = (G.GN[1], G.GN[2]) self.geom_vector = (G.NDIM,) + self.geom_scalar self.geom_tensor = (G.NDIM,) + self.geom_vector self.grid_scalar = (G.GN[1], G.GN[2], G.GN[3]) self.grid_vector = (G.NDIM,) + self.grid_scalar self.grid_tensor = (G.NDIM,) + self.grid_vector self.bulk_scalar = (G.N[1], G.N[2], G.N[3]) self.bulk_vector = (G.NDIM,) + self.bulk_scalar self.bulk_tensor = (G.NDIM,) + self.bulk_vector
#coding: utf-8 import numpy as np import jieba from gensim import corpora, models, similarities from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer, TfidfTransformer from sklearn import metrics from sklearn.naive_bayes import MultinomialNB # #获取文本矢量 # def get_text_vector(docts): # #分词 # texts = [jieba.cut(text) for text in docts] # #转换为列表 # texts = [list(item) for item in texts] # #创建字典,词为key,保证每个key的value不同 # dictionary = dict() # count = 0 # for word_list in (texts): # for word in word_list: # if word not in dictionary: # dictionary[word] = count # count += 1 # total_words = len(dictionary.keys()) #词总数 # #为语料库中出现的所有单词分配了一个唯一的整数id # train_array = [] # for word_list in (texts): # text_array = [0 for i in range(total_words)] # for word in word_list: # text_array[dictionary[word]] += 1 # train_array.append(text_array) # return np.array(train_array) #***基于sklearn文本训练 def NaiveBayes(train_docts, label, target_docts): """ 朴素贝叶斯进行文本分类 :return: None """ # 进行特征提取:词频,TFIDF count_vect = CountVectorizer() tfidf_transformer = TfidfTransformer() X_trainCounts = count_vect.fit_transform(train_docts) X_testCounts = count_vect.transform(target_docts) X_trainTfidf = tfidf_transformer.fit_transform(X_trainCounts) X_testTfidf = tfidf_transformer.transform(X_testCounts) # 利用训练集训练出一个服从高斯分布的贝叶斯分类器模型 clf = MultinomialNB().fit(X_trainTfidf, label) # 利用sklearn模块中的metrics评估分类器效果 predicted = clf.predict(X_testTfidf) # print(metrics.classification_report([1], predicted)) # print("accurary\t" + str(np.mean(predicted == newsTest.target))) return predicted # def get_text_vector(docts): # # 分词 # texts = [list(jieba.cut(text)) for text in docts] # dictionary = corpora.Dictionary(texts) # 制作词袋 # corpus = [dictionary.doc2bow(doc) for doc in texts] # return corpus
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui_error.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Error(object): def setupUi(self, Error): Error.setObjectName("Error") Error.resize(250, 150) Error.setMinimumSize(QtCore.QSize(250, 150)) Error.setMaximumSize(QtCore.QSize(250, 150)) self.horizontalLayout = QtWidgets.QHBoxLayout(Error) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setSpacing(0) self.horizontalLayout.setObjectName("horizontalLayout") self.frame = QtWidgets.QFrame(Error) self.frame.setStyleSheet("background:rgb(51,51,51);") self.frame.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame.setFrameShadow(QtWidgets.QFrame.Plain) self.frame.setObjectName("frame") self.verticalLayout = QtWidgets.QVBoxLayout(self.frame) self.verticalLayout.setContentsMargins(2, 2, 2, 2) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName("verticalLayout") self.frame_top = QtWidgets.QFrame(self.frame) self.frame_top.setMinimumSize(QtCore.QSize(0, 55)) self.frame_top.setMaximumSize(QtCore.QSize(16777215, 55)) self.frame_top.setStyleSheet("background:rgb(91,90,90);") self.frame_top.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_top.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_top.setObjectName("frame_top") self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_top) self.horizontalLayout_2.setContentsMargins(15, 5, 0, 0) self.horizontalLayout_2.setSpacing(5) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.lab_icon = QtWidgets.QLabel(self.frame_top) self.lab_icon.setMinimumSize(QtCore.QSize(35, 35)) self.lab_icon.setMaximumSize(QtCore.QSize(35, 35)) self.lab_icon.setText("") self.lab_icon.setObjectName("lab_icon") self.horizontalLayout_2.addWidget(self.lab_icon) self.lab_heading = QtWidgets.QLabel(self.frame_top) font = QtGui.QFont() font.setFamily("Segoe UI") font.setPointSize(12) self.lab_heading.setFont(font) self.lab_heading.setStyleSheet("color:rgb(255,255,255);") self.lab_heading.setText("") self.lab_heading.setWordWrap(True) self.lab_heading.setObjectName("lab_heading") self.horizontalLayout_2.addWidget(self.lab_heading) self.verticalLayout.addWidget(self.frame_top) self.frame_bottom = QtWidgets.QFrame(self.frame) self.frame_bottom.setStyleSheet("background:rgb(91,90,90);") self.frame_bottom.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_bottom.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_bottom.setObjectName("frame_bottom") self.gridLayout = QtWidgets.QGridLayout(self.frame_bottom) self.gridLayout.setContentsMargins(-1, -1, -1, 0) self.gridLayout.setObjectName("gridLayout") self.bn_ok = QtWidgets.QPushButton(self.frame_bottom) self.bn_ok.setMinimumSize(QtCore.QSize(69, 25)) self.bn_ok.setMaximumSize(QtCore.QSize(69, 25)) self.bn_ok.setStyleSheet("QPushButton {\n" " border: 2px solid rgb(51,51,51);\n" " border-radius: 5px; \n" " color:rgb(255,255,255);\n" " background-color: rgb(51,51,51);\n" "}\n" "QPushButton:hover {\n" " border: 2px solid rgb(0,143,150);\n" " background-color: rgb(0,143,150);\n" "}\n" "QPushButton:pressed { \n" " border: 2px solid rgb(0,143,150);\n" " background-color: rgb(51,51,51);\n" "}\n" "") self.bn_ok.setText("") self.bn_ok.setObjectName("bn_ok") self.gridLayout.addWidget(self.bn_ok, 0, 0, 1, 1) self.verticalLayout.addWidget(self.frame_bottom) self.horizontalLayout.addWidget(self.frame) self.retranslateUi(Error) QtCore.QMetaObject.connectSlotsByName(Error) def retranslateUi(self, Error): _translate = QtCore.QCoreApplication.translate Error.setWindowTitle(_translate("Error", "Dialog")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Error = QtWidgets.QDialog() ui = Ui_Error() ui.setupUi(Error) Error.show() sys.exit(app.exec_())
# Help Commands class help_command(object): def __init__(self): s = self end = '\r\n' s.help_msg = " FrankerZ What would you like help with? !socialCmds, !miscCmds, !infoCmds FrankerZ " + end class help_command_social(object): def __init__(self): s = self end = '\r\n' s.help_social = "Commands are: !Instagram, !Facebook, !Twitter, !Youtube" + end class help_command_misc(object): def __init__(self): s = self end = '\r\n' s.help_misc = "Commands are: ThunBeast !pbw, !" + end class help_command_info(object): def __init__(self): s = self end = '\r\n' s.help_info = "Commands are: NOTHIN HERE YET" + end
import matplotlib.pyplot as plt import numpy as np plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False labels = 'Frogs','Hogs','Dogs','Logs' sizes = [15,20,45,20] colors = ['red','blue','yellow','pink'] explode = (0,0.1,0,0) plt.pie(sizes, explode=explode, colors=colors, labels=labels, shadow=True, startangle=90, autopct='%1.1f%%') plt.axis('equal') plt.show()
print('==== EXERCICIO 022 =====') print('- Crie um programa que leia o nome completo de uma pessoa e mostre: -') print('-> O nome com todas as letras maiúsculas') print('-> O nome com todas minúsculas') print('-> Quantas letras ao todo (Sem considerar espaços)') print('-> Quantas letras tem o primeiro nome') nome = input('Qual o seu nome? ') print('Seu nome todo em maiúsculo:', nome.upper()) print('Seu nome todo em minúsculo', nome.lower()) nomeFormatedSplit = nome.split() nomeFormated = nome.strip() print('Seu nome tem {} letras'.format(len(nomeFormated) - nome.count(' '))) print('Seu primeiro nome tem {} letras'.format(len(nomeFormatedSplit[0])))
#!/usr/bin/python from datetime import datetime limit = int(input("Enter the limit for the primes you want to find: ")) start = datetime.now() arr = [True] * (limit + 61) if limit < 5: exit() if limit >= 2: print(2) if limit >= 3: print(3) if limit >= 5: print(5) list1 = [1, 13, 17, 29, 37, 41, 49, 53] list2 = [7, 19, 31, 43] list3 = [11, 23, 47, 59] listW = list1 + list2 + list3 for x in listW: for w in range(limit // 60 + 1): arr[60 * w + x] = False x = 4 y = 0 n = 5 N = 5 while n <= limit: while n <= limit: if n % 60 in list1: arr[n] = not arr[n] y += 8 n += y x += 8 N += x y = 0 n = N x = 0 y = 4 n = 7 N = 7 while n <= limit: while n <= limit: if n % 60 in list2: arr[n] = not arr[n] y += 8 n += y x += 24 N += x y = 4 n = N x = 2 y = x - 1 n = 11 while n <= limit: while n <= limit and y >= 1: if n % 60 in list3: arr[n] = not arr[n] y -= 2 n = 3 * x * x - y * y x += 1 y = x - 1 n = 3 * x * x - y * y for x in listW: n = x while n * n <= limit: if arr[n]: for y in listW: c = n * n * y while c <= limit: arr[c] = False c += n * n * 60 n += 60 for x in listW: n = x while n < limit: if arr[n]: print(n) n += 60 print("Time: " + (datetime.now() - start).__str__())
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import json from dataclasses import dataclass from pathlib import PurePath from pants.backend.docker.target_types import DockerImageSourceField from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs from pants.backend.python.subsystems.python_tool_base import PythonToolRequirementsBase from pants.backend.python.target_types import EntryPoint from pants.backend.python.util_rules import pex from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess from pants.engine.addresses import Address from pants.engine.fs import CreateDigest, Digest, FileContent from pants.engine.process import Process, ProcessResult from pants.engine.rules import Get, collect_rules, rule from pants.engine.target import ( HydratedSources, HydrateSourcesRequest, SourcesField, WrappedTarget, WrappedTargetRequest, ) from pants.util.logging import LogLevel from pants.util.resources import read_resource _DOCKERFILE_SANDBOX_TOOL = "dockerfile_wrapper_script.py" _DOCKERFILE_PACKAGE = "pants.backend.docker.subsystems" class DockerfileParser(PythonToolRequirementsBase): options_scope = "dockerfile-parser" help = "Used to parse Dockerfile build specs to infer their dependencies." default_requirements = ["dockerfile>=3.2.0,<4"] register_interpreter_constraints = True default_lockfile_resource = (_DOCKERFILE_PACKAGE, "dockerfile.lock") @dataclass(frozen=True) class ParserSetup: pex: VenvPex @rule async def setup_parser(dockerfile_parser: DockerfileParser) -> ParserSetup: parser_script_content = read_resource(_DOCKERFILE_PACKAGE, _DOCKERFILE_SANDBOX_TOOL) if not parser_script_content: raise ValueError( f"Unable to find source to {_DOCKERFILE_SANDBOX_TOOL!r} in {_DOCKERFILE_PACKAGE}." ) parser_content = FileContent( path="__pants_df_parser.py", content=parser_script_content, is_executable=True, ) parser_digest = await Get(Digest, CreateDigest([parser_content])) parser_pex = await Get( VenvPex, PexRequest, dockerfile_parser.to_pex_request( main=EntryPoint(PurePath(parser_content.path).stem), sources=parser_digest ), ) return ParserSetup(parser_pex) @dataclass(frozen=True) class DockerfileParseRequest: sources_digest: Digest args: tuple[str, ...] @rule async def setup_process_for_parse_dockerfile( request: DockerfileParseRequest, parser: ParserSetup ) -> Process: process = await Get( Process, VenvPexProcess( parser.pex, argv=request.args, description="Parse Dockerfile.", input_digest=request.sources_digest, level=LogLevel.DEBUG, ), ) return process class DockerfileInfoError(Exception): pass @dataclass(frozen=True) class DockerfileInfo: address: Address digest: Digest # Data from the parsed Dockerfile, keep in sync with # `dockerfile_wrapper_script.py:ParsedDockerfileInfo`: source: str build_args: DockerBuildArgs = DockerBuildArgs() copy_source_paths: tuple[str, ...] = () from_image_build_args: DockerBuildArgs = DockerBuildArgs() version_tags: tuple[str, ...] = () @dataclass(frozen=True) class DockerfileInfoRequest: address: Address @rule async def parse_dockerfile(request: DockerfileInfoRequest) -> DockerfileInfo: wrapped_target = await Get( WrappedTarget, WrappedTargetRequest(request.address, description_of_origin="<infallible>") ) target = wrapped_target.target sources = await Get( HydratedSources, HydrateSourcesRequest( target.get(SourcesField), for_sources_types=(DockerImageSourceField,), enable_codegen=True, ), ) dockerfiles = sources.snapshot.files assert len(dockerfiles) == 1, ( f"Internal error: Expected a single source file to Dockerfile parse request {request}, " f"got: {dockerfiles}." ) result = await Get( ProcessResult, DockerfileParseRequest( sources.snapshot.digest, dockerfiles, ), ) try: raw_output = result.stdout.decode("utf-8") outputs = json.loads(raw_output) assert len(outputs) == len(dockerfiles) except Exception as e: raise DockerfileInfoError( f"Unexpected failure to parse Dockerfiles: {', '.join(dockerfiles)}, " f"for the {request.address} target: {e}\nDockerfile parser output:\n{raw_output}" ) from e info = outputs[0] try: return DockerfileInfo( address=request.address, digest=sources.snapshot.digest, source=info["source"], build_args=DockerBuildArgs.from_strings( *info["build_args"], duplicates_must_match=True ), copy_source_paths=tuple(info["copy_source_paths"]), from_image_build_args=DockerBuildArgs.from_strings( *info["from_image_build_args"], duplicates_must_match=True ), version_tags=tuple(info["version_tags"]), ) except ValueError as e: raise DockerfileInfoError( f"Error while parsing {info['source']} for the {request.address} target: {e}" ) from e def rules(): return ( *collect_rules(), *pex.rules(), )
from collections import namedtuple TOKEN = '576701434:AAFxQLWEp4HqxaTvNXFLoS4NHMl6jHrZlmA' DB = 'd1l38h8lqhilvc' SERVER = 'ec2-54-221-212-15.compute-1.amazonaws.com' USER = 'gziqyxvqktbptx' field = namedtuple('reg', ('name', 'translate', 'func', 'ars')) field.__new__.__defaults__ = (None,) * 4 user = { 'url_foto': '', 'name': '', 'login': '', 'info': '' } reg = [ field('url_foto', "[1/9] Фото: Отправь фото прямо сюда. Пользователи должны знать, как ты выглядишь."), field('name', "Отлично!|[2/9] Имя: Как тебя представить другим пользователям? Напиши сюда свое имя."), field('info', "Приятно познакомиться {})|[3/9] Город: В каком городе ты живешь или часто бываешь? " "Выбери из списка, ты сможешь изменить его в любой момент в своем профиле.", None, ('name',)), field('citec', "Тестовая строка"), field('pass', 'Конец регистрации') ] users_reg_id = {} users_info = {}
import scrapy class CitySpider(scrapy.Spider): name = "city" def start_requests(self): urls = [ 'https://www.lianjia.com/' ] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): for ul in response.css('ul.clear'): city_names = ul.xpath('li/a/text()').extract() city_urls = ul.xpath('li/a/@href').extract() for i in range(0, len(city_names)): yield { 'city': city_names[i], 'city_url': city_urls[i] }
import contextlib from datetime import datetime from election_snooper.models import SnoopedElection from .base import BaseSnooper class ALDCScraper(BaseSnooper): snooper_name = "ALDC" base_url = "https://www.aldc.org/" def get_all(self): url = "{}category/forthcoming-by-elections/".format(self.base_url) print(url) soup = self.get_soup(url) for tile in soup.find_all("article"): title = tile.find("h2").text.strip() detail_url = url + "#" + tile["id"] date = tile.find("date").text.strip() content = tile.find("div", {"class": ""}).find_all("p") if content and content[0].text.lower().count("cause") == 1: seat_control, cause = content[0].text.lower().split("cause") cause = cause.split("\n")[0].strip(": .") else: cause = "unknown" data = { "title": title, "source": url, "cause": cause, "detail": "\n".join([x.text for x in content]), "snooper_name": self.snooper_name, } with contextlib.suppress(ValueError): data["date"] = datetime.strptime(date, "%B %d, %Y") item, created = SnoopedElection.objects.update_or_create( snooper_name=self.snooper_name, detail_url=detail_url, defaults=data, ) if created: self.post_to_slack(item)
import xmltodict import cPickle as pickle import sys,os import re class ForumPost(object): def __init__(self,xml_file_name): with open(xml_file_name,'r') as data: parsed_data = xmltodict.parse(data.read()) self.post_type = parsed_data.keys()[0] self.message_type = parsed_data[self.post_type][u'message'][u'@type'] message = parsed_data[self.post_type][u'message'] self.board_id = message[u'board_id'][u'#text'] try: self.text = message[u'body'][u'#text'] except Exception, e: self.text = "" self.author = message[u'author'][u'login'][u'#text'] self.label = '' self.label_fg = ''
import numpy as np def _raw_moment(data, i_order, j_order): nrows, ncols = data.shape y_indices, x_indicies = np.mgrid[:nrows, :ncols] return (data * x_indicies**i_order * y_indices**j_order).sum() def _moments_cov(data): data_sum = data.sum() m10 = _raw_moment(data, 1, 0) m01 = _raw_moment(data, 0, 1) x_centroid = m10 / data_sum y_centroid = m01 / data_sum u11 = (_raw_moment(data, 1, 1) - x_centroid * m01) / data_sum u20 = (_raw_moment(data, 2, 0) - x_centroid * m10) / data_sum u02 = (_raw_moment(data, 0, 2) - y_centroid * m01) / data_sum cov = np.array([[u20, u11], [u11, u02]]) return cov def orientation(mask, debug=False, periodic_domain=False): """ Compute a measure for a single (cloud) mask's degree of directional alignment, using the (cloud) mask's raw image moment covariance matrix. Code based on: https://github.com/alyssaq/blog/blob/master/posts/150114-054922_computing-the-axes-or-orientation-of-a-blob.md. Note that this function currently does not support periodic boundary conditions (use the wavelet-based orientation measure woi3 for such scenes). Parameters ---------- mask : numpy array of shape (npx,npx) - npx is number of pixels (cloud) mask field. Returns ------- orie : float Orientation measure (dimensionless value between 0-1, with 0 denoting no preferential direction of orientation and 1 denoting that all information is oriented in one direction) """ if periodic_domain: raise NotImplementedError(periodic_domain) cov = _moments_cov(mask) if np.isnan(cov).any() or np.isinf(cov).any(): return np.nan evals, evecs = np.linalg.eig(cov) orie = np.sqrt(1 - np.min(evals) / np.max(evals)) if debug: _debug_plot(mask=mask, evecs=evecs, orie=orie, evals=evals) return orie def _debug_plot(mask, evals, evecs, orie): import matplotlib.pyplot as plt sort_indices = np.argsort(evals)[::-1] x_v1, y_v1 = evecs[:, sort_indices[0]] # evec with largest eval x_v2, y_v2 = evecs[:, sort_indices[1]] evalsn = evals[sort_indices] / evals[sort_indices][0] scale = 10 ox = int(mask.shape[1] / 2) oy = int(mask.shape[0] / 2) lw = 5 _, ax = plt.subplots() ax.imshow(mask, "gray") # plt.scatter(ox+x_v1*-scale*2,oy+y_v1*-scale*2,s=100) ax.plot( [ox - x_v1 * scale * evalsn[0], ox + x_v1 * scale * evalsn[0]], [oy - y_v1 * scale * evalsn[0], oy + y_v1 * scale * evalsn[0]], linewidth=lw, ) ax.plot( [ox - x_v2 * scale * evalsn[1], ox + x_v2 * scale * evalsn[1]], [oy - y_v2 * scale * evalsn[1], oy + y_v2 * scale * evalsn[1]], linewidth=lw, ) ax.set_title("Alignment measure = " + str(round(orie, 3))) plt.show()
# Generated by Django 2.2.4 on 2019-08-24 11:51 from django.db import migrations, models import phonenumber_field.modelfields class Migration(migrations.Migration): dependencies = [ ('visitors', '0001_initial'), ] operations = [ migrations.AlterField( model_name='track_entry', name='exit_car_plate_image', field=models.ImageField(blank=True, help_text='Exit Carpate Image.', null=True, upload_to='exit/'), ), migrations.AlterField( model_name='track_entry', name='visitor_phone_number', field=phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None), ), ]
""" * Copyright 2020, Departamento de sistemas y Computación * Universidad de Los Andes * * * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos * * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * Contribución de: * * Dario Correal * """ import config from DISClib.ADT.graph import gr from DISClib.ADT import map as m from DISClib.ADT import list as lt from DISClib.DataStructures import listiterator as it from DISClib.Algorithms.Graphs import scc from DISClib.Algorithms.Graphs import dijsktra as djk from DISClib.Utils import error as error assert config """ En este archivo definimos los TADs que vamos a usar y las operaciones de creacion y consulta sobre las estructuras de datos. """ # ----------------------------------------------------- # API def analyzer(): analyzer = {"index":None, "graph":None} analyzer["index"] = m.newMap(numelements=1000, prime=109345121, maptype="CHAINING", loadfactor=1.0, comparefunction=None) analyzer["graph"] = gr.newGraph(datastructure='ADJ_LIST', directed=True, size=1000, comparefunction=comparer) return analyzer # ----------------------------------------------------- # Funciones para agregar informacion al grafo def AñadirRuta(analyzer, route): """ """ origin = route['start station id'] destination = route['end station id'] duration = int(route['tripduration']) AñadirEstacion(analyzer, origin) AñadirEstacion(analyzer, destination) AñadirConeccion(analyzer, origin, destination, duration) def AñadirEstacion(analyzer, estacion): if not gr.containsVertex(analyzer["graph"], estacion): gr.insertVertex(analyzer["graph"], estacion) return analyzer def AñadirConeccion(analyzer, origin, destination, duration): """ Adiciona un arco entre dos estaciones """ edge = gr.getEdge(analyzer["graph"], origin, destination) if edge is None: gr.addEdge(analyzer["graph"], origin, destination, duration) else: edge["weight"] = (edge["weight"]+int(duration))/2 return analyzer # ============================== # Funciones de consulta # ============================== def TotaldeClusteres(analyzer): A = scc.KosarajuSCC(analyzer["graph"]) return scc.connectedComponents(A) def ClusterPresence(analyzer,id1,id2): A = scc.KosarajuSCC(analyzer["graph"]) return scc.stronglyConnected(A, id1, id2) def TotalDeVertices(analyzer): return gr.numVertices(analyzer["graph"]) def TotalDeArcos(analyzer): return gr.numEdges(analyzer["graph"]) # ============================== # Funciones Helper # ============================== # ============================== # Funciones de Comparacion # ============================== def comparer(stop, keyvaluestop): """ Compara dos estaciones """ stopcode = keyvaluestop['key'] if (stop == stopcode): return 0 elif (stop > stopcode): return 1 else: return -1
from settings import * from Helper import Utils import pickle from PIL import Image class Test(object): def __init__(self): pass def date(self): now = Utils().get_current_date() print(now) def visualize_images(self): image_information = global_path_to_other_results + '/train12017-02-02-18:18:34.pickle' with open(image_information, 'rb') as f: data = pickle.load(f) filenames = data['filenames'] path = global_path_to_original_train_data for i in range(0, 150): path_to_file = path + '/' + filenames[i] + '.jpg' if os.path.exists(path_to_file): im = Image.open(path_to_file) im.show() print(path_to_file) def main(): test = Test() test.visualize_images() if __name__ == "__main__": main()
# Generated by Django 3.0.4 on 2020-03-13 10:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0102_auto_20200313_1551'), ] operations = [ migrations.AddField( model_name='boq', name='nature', field=models.CharField(choices=[('SUPPLY', 'SUPPLY'), ('ERECTION', 'ERECTION')], default='', max_length=50), preserve_default=False, ), migrations.AlterField( model_name='boq', name='region', field=models.CharField(choices=[('RURAL', 'RURAL'), ('URBAN', 'URBAN')], max_length=50), ), ]
# Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param A : root node of tree # @return the root node in the tree def recursive_invert(self, node): if node: node.left, node.right = node.right, node.left self.invertTree(node.left) self.invertTree(node.right) def invertTree(self, A): self.recursive_invert(A) return A
#!/usr/bin/env python import rospy from std_msgs.msg import Int32 def result(msg): pub1 = rospy.Publisher('result', Int32, queue_size=10) rate1 = rospy.Rate(20) pub1.publish(msg) rate1.sleep() def callback(data): msg = (data.data)/0.15 rospy.loginfo(msg) result(msg) def receive(): rospy.init_node('receive', anonymous=True) rospy.Subscriber("jain", Int32, callback) rospy.spin() if __name__ == '__main__': receive()
# Edge Detection using Canny edge detector # Edges are set of points(lines), where image brightness changes sharply # Import Computer Vision package - cv2 import cv2 # Import Numerical Python package - numpy as np import numpy as np # Read the image using imread built-in function image = cv2.imread('image_8.jpg') # Display original image using imshow built-in function cv2.imshow("Original", image) # Wait until any key is pressed cv2.waitKey(0) # cv2.Canny is the built-in function used to detect edges # cv2.Canny(image, threshold_1, threshold_2) canny = cv2.Canny(image, 50, 200) # Display edge detected output image using imshow built-in function cv2.imshow('Canny Edge Detection', canny) # Wait until any key is pressed cv2.waitKey(0) # Close all windows cv2.destroyAllWindows()
from django.urls import path, include from sample1.views import HomePageView urlpatterns = [ path('', HomePageView.as_view(), name='home') ]
# This file tests lib.py. import unittest from lib import * class TestGoogleAPI(unittest.TestCase): def setUp(self): pass def test_time_in_hours_between_locations(self): # TODO(youness) start = "Carlsbad, California" stop = "San Francisco, California" hours = time_in_hours_between_locations(start, stop) print(hours) self.assertGreater(hours, 4.0) self.assertLess(hours, 16.0) if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-08-10 05:40 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Vm', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=170)), ('ip_address', models.CharField(max_length=30)), ], ), migrations.CreateModel( name='VmDetails', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('powerState', models.CharField(max_length=10, null=True)), ('maxCpuUsage', models.CharField(max_length=20, null=True)), ('maxMemoryUsage', models.CharField(max_length=20, null=True)), ('template', models.CharField(max_length=10, null=True)), ('memorySizeMB', models.IntegerField(null=True)), ('numCpu', models.IntegerField(null=True)), ('numEthernetCards', models.IntegerField(null=True)), ('numVirtualDisks', models.IntegerField(null=True)), ('instanceUuid', models.CharField(max_length=100, null=True)), ('uuid', models.CharField(max_length=100, null=True)), ('committed', models.IntegerField(null=True)), ('uncommitted', models.IntegerField(null=True)), ('overallCpuUsage', models.IntegerField(null=True)), ('overallCpuDemand', models.IntegerField(null=True)), ('guestMemoryUsage', models.IntegerField(null=True)), ('hostMemoryUsage', models.IntegerField(null=True)), ('guestHeartbeatStatus', models.CharField(max_length=100, null=True)), ('privateMemory', models.IntegerField(null=True)), ('consumedOverheadMemory', models.CharField(max_length=100, null=True)), ('ftLatencyStatus', models.CharField(max_length=100, null=True)), ('host_ip', models.CharField(max_length=100, null=True)), ('vcenter_date', models.DateField()), ('createdate', models.DateField()), ('id_vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vm.Vm')), ], ), ]
#!/usr/bin/env python ################################################################### # This script works for any applications. # It creates a new file (line 21) containing only the last # concentration group and everything else needed to restart Xolotl. # This is useful when restarting the simulation often and when # the other concentration groups won't be needed for data analysis. ################################################################### import h5py ## Open the file we want to copy from f = h5py.File('/home/sophie/Workspace/xolotl-dynamicGrid-build/script/xolotlStop.h5', 'r') ## Get the last time step saved in the file concGroup = f['concentrationsGroup'] timestep = concGroup.attrs['lastTimeStep'] lastLoop = concGroup.attrs['lastLoop'] ## Create the file to copy to fNew = h5py.File('/home/sophie/Workspace/xolotl-dynamicGrid-build/script/networkNew.h5', 'a') ## Create the concentration group concGroupNew = fNew.create_group('concentrationsGroup') ## Set the last time step concGroupNew.attrs['lastTimeStep'] = timestep concGroupNew.attrs['lastLoop'] = lastLoop ## Copy the last timestep group groupName ='concentration_' + str(lastLoop) + '_' + str(timestep) concGroup.copy(groupName, concGroupNew) ## Copy the other groups f.copy('networkGroup', fNew)
from common.run_method import RunMethod import allure @allure.step("极运营/营销中心/业绩归属/修改介绍人") def web_performance_change_employee_id_post(params=None, body=None, header=None, return_json=True, **kwargs): ''' :param: url地址后面的参数 :body: 请求体 :return_json: 是否返回json格式的响应(默认是) :header: 请求的header :host: 请求的环境 :return: 默认json格式的响应, return_json=False返回原始响应 ''' name = "极运营/营销中心/业绩归属/修改介绍人" url = f"/service-finance/web/performance/change/employee/id" res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs) return res @allure.step("极运营/营销中心/业绩归属/修改业绩归属人") def web_performance_change_introducerStudent_id_post(params=None, body=None, header=None, return_json=True, **kwargs): ''' :param: url地址后面的参数 :body: 请求体 :return_json: 是否返回json格式的响应(默认是) :header: 请求的header :host: 请求的环境 :return: 默认json格式的响应, return_json=False返回原始响应 ''' name = "极运营/营销中心/业绩归属/修改业绩归属人" url = f"/service-finance/web/performance/change/introducerStudent/id" res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs) return res @allure.step("极运营/营销中心/业绩归属/业绩归属明细") def web_performance_queryOrderDetailList_post(params=None, body=None, header=None, return_json=True, **kwargs): ''' :param: url地址后面的参数 :body: 请求体 :return_json: 是否返回json格式的响应(默认是) :header: 请求的header :host: 请求的环境 :return: 默认json格式的响应, return_json=False返回原始响应 ''' name = "极运营/营销中心/业绩归属/业绩归属明细" url = f"/service-finance/web/performance/queryOrderDetailList" res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs) return res @allure.step("极运营/营销中心/业绩归属/无业绩归属人明细") def web_performance_queryPerformanceDetailList_post(params=None, body=None, header=None, return_json=True, **kwargs): ''' :param: url地址后面的参数 :body: 请求体 :return_json: 是否返回json格式的响应(默认是) :header: 请求的header :host: 请求的环境 :return: 默认json格式的响应, return_json=False返回原始响应 ''' name = "极运营/营销中心/业绩归属/无业绩归属人明细" url = f"/service-finance/web/performance/queryPerformanceDetailList" res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs) return res
#!/usr/bin/env python # Copyright (c) 2019-present, HuggingFace Inc. # All rights reserved. This source code is licensed under the BSD-style license # found in the LICENSE file in the root directory of this source tree. import json import os import logging import shutil from tqdm import tqdm import torch import torch.cuda from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME from grtr.env_utils import OUTPUT_DIR from grtr.utils import (get_blank_training_state, save_model) logger = logging.getLogger(__file__) def train_epoch(in_model, in_data_loader, in_optimizer, args, training_state, amp=None, save=True): in_model.train() RESULT_DIR = os.path.join(OUTPUT_DIR, args.model_checkpoint) TRAINING_STATE_FILE = os.path.join(RESULT_DIR, 'training_state.json') CHECKPOINT_FILE = os.path.join(RESULT_DIR, 'checkpoint.pt') for iter, batch in tqdm(enumerate(in_data_loader), total=len(in_data_loader)): if iter < training_state['step']: continue batch = tuple(input_tensor.to(args.device) for input_tensor in batch) lm_loss, mc_loss = in_model(*batch)[:2] loss = (lm_loss * args.lm_coef + mc_loss * args.mc_coef) / args.gradient_accumulation_steps loss = torch.mean(loss) if amp is not None: with amp.scale_loss(loss, in_optimizer) as scaled_loss: scaled_loss = scaled_loss.float() scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(in_optimizer), args.max_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(in_model.parameters(), args.max_norm) if iter % args.gradient_accumulation_steps == 0: in_optimizer.step() in_optimizer.zero_grad() training_state['step'] += 1 if args.local_rank in [-1, 0] and args.n_epochs > 0 and save: if args.steps_per_checkpoint != 0 and training_state['step'] % args.steps_per_checkpoint == 0: save_model(in_model, CHECKPOINT_FILE) with open(TRAINING_STATE_FILE, 'w') as training_state_out: json.dump(training_state, training_state_out) # Evaluation function and evaluator (evaluator output is the input of the metrics) def evaluate(in_model, in_tokenizer, in_data_loader, args): in_model.eval() loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1) loss = 0.0 acc = 0.0 with torch.no_grad(): for batch in tqdm(in_data_loader, total=len(in_data_loader)): batch = tuple(input_tensor.to(args.device) for input_tensor in batch) input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch logger.debug(in_tokenizer.decode(input_ids[0, -1, :].tolist())) model_outputs = in_model(input_ids, mc_token_ids, token_type_ids=token_type_ids) lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1)) lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1) results = (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels) loss += loss_fn(results[0][0], results[1][0]) / len(in_data_loader) batch_acc = ((mc_labels.eq(mc_logits.max(-1)[1])).sum()) / mc_labels.shape[0] acc += batch_acc / len(in_data_loader) return {'loss': loss.item() if isinstance(loss, torch.Tensor) else loss, 'acc': acc.item() if isinstance(loss, torch.Tensor) else acc} def train(args, model, tokenizer, optimizer, train_loader, train_sampler, valid_loader, valid_sampler, save=True, amp=None): # Training function and trainer RESULT_DIR = os.path.join(OUTPUT_DIR, args.model_checkpoint) TRAINING_STATE_FILE = os.path.join(RESULT_DIR, 'training_state.json') CHECKPOINT_FILE = os.path.join(RESULT_DIR, 'checkpoint.pt') BEST_MODEL_FILE = os.path.join(RESULT_DIR, WEIGHTS_NAME) if not os.path.exists(RESULT_DIR): os.makedirs(RESULT_DIR) training_state = get_blank_training_state() if os.path.exists(TRAINING_STATE_FILE): with open(TRAINING_STATE_FILE) as training_state_in: training_state = json.load(training_state_in) if os.path.exists(CHECKPOINT_FILE): model.load_state_dict(torch.load(CHECKPOINT_FILE)) if args.local_rank in [-1, 0] and save: torch.save(args, os.path.join(RESULT_DIR, 'model_training_args.bin')) getattr(model, 'module', model).config.to_json_file(os.path.join(RESULT_DIR, CONFIG_NAME)) tokenizer.save_vocabulary(RESULT_DIR) for epoch in range(training_state['epoch'], args.n_epochs): logger.info('Starting epoch {}'.format(epoch)) if args.distributed: train_sampler.set_epoch(epoch) valid_sampler.set_epoch(epoch) train_epoch(model, train_loader, optimizer, args, training_state=training_state, amp=amp, save=save) eval_dict = evaluate(model, tokenizer, valid_loader, args) logger.info(json.dumps(eval_dict, indent=2)) if args.local_rank in [-1, 0] and args.n_epochs > 0 and save: save_model(model, CHECKPOINT_FILE) if eval_dict['loss'] < training_state['best_loss']: logger.info('New best loss - saving model') training_state['best_loss'] = eval_dict['loss'] training_state['steps_without_improvement'] = 0 # On the main process: close tensorboard logger and rename the last checkpoint # (for easy re-loading with OpenAIGPTModel.from_pretrained method) if args.local_rank in [-1, 0] and args.n_epochs > 0: if save: shutil.copy(CHECKPOINT_FILE, BEST_MODEL_FILE) else: training_state['steps_without_improvement'] += 1 training_state['epoch'] += 1 training_state['step'] = 0 with open(TRAINING_STATE_FILE, 'w') as training_state_out: json.dump(training_state, training_state_out) if training_state['steps_without_improvement'] == args.early_stopping_after: logger.info('Stopping after {} epochs'.format(epoch + 1)) break if os.path.exists(CHECKPOINT_FILE): os.remove(CHECKPOINT_FILE) if os.path.exists(TRAINING_STATE_FILE): os.remove(TRAINING_STATE_FILE)
from django.contrib.auth import authenticate, login from django.contrib.auth.models import Permission from django.db import transaction from django.forms import PasswordInput from django.shortcuts import redirect from rest_framework import serializers from rest_framework.authtoken.models import Token from rest_framework.exceptions import ValidationError from post_app.models import * from reddit2_api import settings class PostSerializer(serializers.ModelSerializer): class Meta: model = Post fields = [ 'id', 'user', 'title', 'created_date', 'title', 'content', 'nr_likes', 'image', ] class PostCreateUpdateSerializer(serializers.ModelSerializer): class Meta: model = Post fields = [ 'title', 'content', 'image', ] class PosterDetailsSerializer(serializers.ModelSerializer): class Meta: model = Poster fields = ('username','email') class PosterSerializer(serializers.ModelSerializer): password = serializers.CharField(style={'input_type': 'password'}) class Meta: model = Poster fields = ('username', 'password', 'email') class CommentSerializer(serializers.ModelSerializer): class Meta: model = Comment fields = '__all__' class ProfileSerializer(serializers.ModelSerializer): user = PosterDetailsSerializer() class Meta: model = Profile fields =('user','first_name', 'last_name','gender','date_of_birth','avatar') class ProfileSerializerWithID(serializers.ModelSerializer): user = PosterDetailsSerializer() class Meta: model = Profile fields = ('id','user', 'first_name', 'last_name', 'gender', 'date_of_birth', 'avatar') class UserCreateSerializer(serializers.ModelSerializer): user = PosterSerializer() class Meta: model = Profile fields = ('user','first_name', 'last_name','gender','date_of_birth','avatar') def create(self, validated_data): user_data = validated_data.pop('user') with transaction.atomic(): user = Poster.objects.create( username=user_data['username'], email=user_data['email'], ) user.set_password(user_data['password']) user.save() return Profile.objects.create(user=user, **validated_data) class CommentCreateSerializer(serializers.ModelSerializer): class Meta: model = Comment fields = ('comment',) class LikeCreateSerializer(serializers.ModelSerializer): class Meta: model = Like fields = () class UserLoginSerializer(serializers.ModelSerializer): username = serializers.CharField() password = serializers.CharField() class Meta: model = Poster fields = ['username', 'password'] extra_kwargs = {"password": {"write_only": True}} def validate(self, data): username = data['username'] password = data['password'] user = Poster.objects.get(username = username) if not user: raise ValidationError('This user name is not valid') if not user.check_password(password): raise ValidationError('This password is not valid') return user class UserLogouterializer(serializers.ModelSerializer): class Meta: model = Poster fields = ['username', 'password'] extra_kwargs = {"password": {"write_only": True}} def validate(self, data): username = data['username'] password = data['password'] user = Poster.objects.get(username = username) if not user: raise ValidationError('This user name is not valid') if not user.check_password(password): raise ValidationError('This password is not valid') return user
from django.apps import AppConfig class WeeklyPlannerConfig(AppConfig): name = 'weekly_planner'
"""Testing Unicode basics.""" # -*- coding: UTF-8 -*- from dnstwister import dnstwist, tools def test_encode_ascii_domain(): assert tools.encode_domain('www.example.com') == '7777772e6578616d706c652e636f6d' def test_encode_unicode_domain(): unicode_domain = u'www.\u0454xampl\u0454.com' # www.xn--xampl-91ef.com in hex assert tools.encode_domain(unicode_domain) == '7777772e786e2d2d78616d706c2d393165662e636f6d' def test_encode_punycoded_domain(): punycode_domain = 'www.xampl.com-ehlf' assert tools.encode_domain(punycode_domain) == '7777772e78616d706c2e636f6d2d65686c66' def test_decode_encoded_ascii_domain(): assert tools.decode_domain('7777772e6578616d706c652e636f6d') == 'www.example.com' def test_decode_encoded_invalid_ascii_domain(): """Weird edge cases with non-domains that were causing issues.""" assert tools.encode_domain('example') == '6578616d706c65' assert tools.decode_domain('6578616d706c65') == 'example' assert tools.decode_domain(u'6578616d706c65') == 'example' def test_decode_encoded_unicode_punycoded_domain(): # www.xn--xampl-91ef.com in hex assert tools.decode_domain('7777772e786e2d2d78616d706c2d393165662e636f6d') == u'www.\u0454xampl\u0454.com' def test_dnstwist_validations(): """dnstwist validates domains internally, including unicode.""" assert dnstwist.dnstwist.is_valid_domain('www.example1.com') assert dnstwist.dnstwist.is_valid_domain(u'www.\u0454xampl\u0454.com') assert dnstwist.dnstwist.is_valid_domain(u'www.\u0454xampl\u0454.com') assert not dnstwist.dnstwist.is_valid_domain('www.\u0454xampl\u0454.com') assert not dnstwist.dnstwist.is_valid_domain(u'example1') assert not dnstwist.dnstwist.is_valid_domain('example1') def test_idna_submit(webapp): """Can submit idna encoded.""" idna_domain = 'xn--plnt-1na.com' # 'plànt.com' response = webapp.post('/search', {'domains': idna_domain}) assert response.headers['location'] == 'http://localhost/search/786e2d2d706c6e742d316e612e636f6d' def test_raw_unicode_submit(webapp): """Can submit idna encoded.""" domain = u'pl\u00E0nt.com' # 'plànt.com' response = webapp.post( '/search', {'domains': domain}, content_type='application/x-www-form-urlencoded; charset=utf-8', ) assert response.headers['location'] == 'http://localhost/search/786e2d2d706c6e742d316e612e636f6d'
from keras.models import model_from_json import librosa import librosa.feature import glob import numpy as np def load_model(): json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights("model.h5") print("Loaded model from disk") return loaded_model def extract_features_song(src): y, _ = librosa.load(src) #get mfcc mfcc = librosa.feature.mfcc(y) #normalize between -1 and 1 mfcc /= np.amax(np.absolute(mfcc)) return np.ndarray.flatten(mfcc)[:25000] def get_genre(fname): genres = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock'] model = load_model() features = extract_features_song(fname) try: features = np.reshape(features, (1,25000)) prediction = model.predict(features) genre_idx = np.argmax(prediction) return genres[genre_idx] except: return "File too short. Not enough features to predict."
from adminapp.models import Exhibit, Exhibit_Notification, Question, User, User_Profile, Subscription, Faq from django.contrib import admin # Register your models here. admin.site.register(Exhibit) admin.site.register(Question) admin.site.register(User_Profile) admin.site.register(User) admin.site.register(Exhibit_Notification) admin.site.register(Subscription) admin.site.register(Faq)
import imaplib import base64 import re import time from pynput.keyboard import Controller, KeyCode keyboardControl = Controller() def server(): email_user = "notifymelocalhost@gmail.com" email_pass = "elhuevo591" print("-----------------------------------------------------------------------------------") print("Waiting for income message, to quit keep pressed ESC button") M = imaplib.IMAP4_SSL('imap.gmail.com', 993) M.login(email_user, email_pass) while(True): try: M.select() typ, message_numbers = M.search(None, 'ALL') for num in message_numbers[0].split(): typ, data = M.fetch(num, '(RFC822)') message = str(data[0][1].decode('utf-8')) message = message.replace('\n', '').replace('\r', '').replace('=', '').replace('***',' ') try: print(message) email = str(re.search('XXX(.+?)XXX', message).group(1)) app = str(re.search('YYY(.+?)YYY', message).group(1)) notification = str(re.search('ZZZ(.+?)ZZZ', message).group(1)).lower().replace('---','\n') if(email=="alexandrotapiaflores@gmail.com"): print("to: " + email) print("application: " + app) print("body:\n" + notification) print("-----------------------------------------------------------------------------------") M.store(num, '+FLAGS', '\\Deleted') if(("spotify" in notification or "youtube" in notification or "music" in notification) and ("play" in notification or "stop" in notification or "pause" in notification or "para" in notification)): keyboardControl.press(KeyCode.from_vk(0xB3)) except : pass except: try: M.close() M.logout() M = imaplib.IMAP4_SSL('imap.gmail.com', 993) M.login(email_user, email_pass) except: pass print("An error has occurred, if it persists keep pressed ESC button") server() server()
# Enter your code here. Read input from STDIN. Print output to STDOUT var = list(map(int, input().split())) #print(var) n = var[0] q = var[1] nodes = [] sets = [] for i in range(n-1): nodes.append(list(map(int, input().split()))) #print(nodes) for i in range(q): a = int(input()) b = list(map(int, input().split())) sets.append(b) ''' print("q: ", q) print("n: ", n) print("nodes: ", nodes) print("sets: ", sets) ''' #DEFINITION OF NODE class Node: def __init__(self, data): self.data = data self.link = [] def appender(self, link): self.link.append(link) lst = [] #BELOW IS MAKING TREE BASED ON THE ENTERED INPUTS for nodeINPUT in nodes: if len(lst) == 0: node1 = Node(nodeINPUT[0]) node2 = Node(nodeINPUT[1]) lst.append(node1) lst.append(node2) else: for i in range(len(lst)): if lst[i].data == nodeINPUT[0]: node1 =lst[i] break else: node1 = Node(nodeINPUT[0]) if i == len(lst)-1: lst.append(node1) for i in range(len(lst)): if lst[i].data == nodeINPUT[1]: node2 =lst[i] break else: node2 = Node(nodeINPUT[1]) if i == len(lst)-1: lst.append(node2) node1.appender(node2) node2.appender(node1) ''' def levelofnode(root, key, level, avoid): if root == None: return -1 if root.data == key: return level for i in range(len(root.link)): if i != avoid or avoid == -1: print('IT Ran! at', i) avoid = root.link[i].link.index(root) l = levelofnode(root.link[i], key, level+1, avoid) if l != -1 and l != None: return l if i == (len(root.link)-1): return levelofnode(root.link[i], key, level+1, avoid) avoid = -1 #root address is not in it's link itself print('OUTPUT OF THE FUNCTION IS:', levelofnode(root, 2, 0, avoid)) ''' #THIS FINDS OUT THE DISTANCE BETWEEN TWO NODES. HERE ROOT IS THE ADDRESS OF THE VALUE V1 AND 'AVOID' MAKES SURE IT DOES NOT COME BACK. def dist(root, v2, avoid): if root == None: return -1 if root.data == v2: return 0 for i in range(len(root.link)): if i != avoid or avoid == -1: avoid = root.link[i].link.index(root) if dist(root.link[i], v2, avoid) != None: l = 1 + dist(root.link[i], v2, avoid) else: continue if l != -1 and l != None: return l ''' root.link.append(root) def lca(root, v1, v2): i = 0 for node in root.link: if i == 0: if node != root and root.data > max(v1, v2): return lca(root.link[i], v1, v2) else: if node != root and root.data < min(v1, v2): ''' from itertools import combinations for SET in sets: if len(SET) != 1: total = 0 for pair in combinations(SET, 2): for node in lst: if node.data == pair[0]: root = node total = total + pair[0]*pair[1]*dist(root, pair[1], -1) print(total%1000000007) else: print(0)
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-02-09 11:57 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Comments', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('comment', models.CharField(max_length=250)), ], ), migrations.CreateModel( name='Photo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=155)), ('photo_image', models.ImageField(null=True, upload_to='landing_images/')), ('description', models.TextField(max_length=255)), ('likes', models.IntegerField(default=0)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('profile_picture', models.ImageField(null=True, upload_to='profile_photo/')), ('bio', models.CharField(max_length=255)), ('contact', models.TextField(max_length=255)), ('photos', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Photo')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='comments', name='commented_photo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Photo'), ), migrations.AddField( model_name='comments', name='posted_by', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Profile'), ), migrations.AddField( model_name='comments', name='user', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
import torch.utils.data as data import os import os.path import numpy as np def npy_loader(dir,file_name): path = os.path.join(dir,file_name) output = np.load(path) return output class ListDataset(data.Dataset): def __init__(self, input_root, path_list, co_transforms = None, input_transforms = None,args=None,mode=None,give_name = False): self.input_root = input_root self.path_list = path_list self.loader = npy_loader self.input_transforms = input_transforms self.co_transforms = co_transforms self.args = args self.give_name =give_name self.mode = mode def __getitem__(self,index): inputs_list = self.path_list[index] input_name = inputs_list[0] input_name = input_name[:-4] inputs = self.loader(self.input_root,inputs_list[0]) if self.mode == 'train': if self.co_transforms is not None: inputs = self.co_transforms(inputs) if self.input_transforms is not None: inputs = self.input_transforms(inputs) if(self.give_name==True): return inputs, input_name else: return inputs def __len__(self): return len(self.path_list)
import pandas as pd import networkx as nx import matplotlib.pyplot as plt def read_tweets(): """ 读取twitter生成user列表 :return: user列表 """ user_tweet_list = [] with open('Tweets/R_DeodorantCancer.txt') as f2: for line, column in enumerate(f2): column = column.replace('\n', '') user_t_id, tweet_id, content, time = column.split('\t')[:] user_tweet_list.append(user_t_id) user_tweet_list = list(map(int, user_tweet_list)) user_tweet_list = set(user_tweet_list) # 去除重复用户id print("tweet_read_complete") return user_tweet_list def read_links(): """ :return: data frame; user_id, following_id """ reader = pd.read_csv('deodorant_link_network.csv', header=None, names=['user_id', 'following_id'], iterator=True) loop = True chunkSize = 50 chunks = [] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False print("Iteration is stopped.") df = pd.concat(chunks, ignore_index=True) order = ['following_id', 'user_id'] df = df[order] return df if __name__ == '__main__': user_list = read_tweets() df_links = read_links() link_list = df_links.values.tolist() # 按行转换成列表 G = nx.DiGraph() G.add_nodes_from(user_list) G.add_edges_from(link_list) # print(G.number_of_nodes()) # print(G.number_of_edges()) pos = nx.spring_layout(G) nx.draw(G, pos, with_labels=True, font_size=5, node_size=30, width=0.3, alpha=0.8) plt.show()
version = '0.0.0' def main(): from datetime import datetime from dateutil.relativedelta import relativedelta return datetime.now() + relativedelta(day=32, months=-1)
from abc import ABC # import all autoencoder versions from models.unsupervised import linear_vae as lv_py from models.unsupervised import vae_conv-train_load as cv_k class Encoder(ABC): def __init__(self, latent_size): """ Parameters: ---------- latent_size : int latent dimension size of the autoencoder. """ if (latent_size < 0): raise Exception("latent_size must be greater than 0!") self.latent_size = latent_size super().__init__() class ConvEncoder(Encoder): def __init__(self, latent_size): """ Parameters: ---------- latent_size : int latent dimension size of the autoencoder. """ super().__init__(latent_size) self.type = 'conv' class LinEncoder(Encoder): def __init__(self, latent_size, input_size, num_layers, activation): """ Parameters: ---------- latent_size : int latent dimension size of the autoencoder. input_size : int Input dimension for the data. Should equal the output dimension of the decoder. num_layers : int Number of linear dense layers to add to the encoder. activation : str Type of activation function EX) 'sigmoid', 'relu' """ super().__init__(latent_size) if (input_size < 0): raise Exception("input_size must be greater than 0!") if (num_layers < 0): raise Exception("num_layers must be greater than 0!") # TODO: Add exception handling for activation self.latent_size = latent_size self.output_size = output_size self.num_layers = num_layers self.activation = activation self.type = 'lin' class Decoder(ABC): def __init__(self, latent_size): """ Parameters: ---------- latent_size : int latent dimension size of the autoencoder. """ if (latent_size < 0): raise Exception("latent_size must be greater than 0!") self.latent_size = latent_size super().__init__() class ConvDecoder(Decoder): def __init__(self, latent_size): """ Parameters: ---------- latent_size : int latent dimension size of the autoencoder. """ super().__init__(latent_size) self.type = 'conv' class LinDecoder(Decoder): def __init__(self, latent_size, output_size, num_layers, activation): """ Parameters: ---------- latent_size : int latent dimension size of the autoencoder. output_size : int Output dimension for the data. Should equal the input dimension of the encoder. num_layers : int Number of linear dense layers to add to the decoder. activation : str Type of activation function EX) 'sigmoid', 'relu' """ super().__init__(latent_size) if (output_size < 0): raise Exception("output_size must be greater than 0!") if (num_layers < 0): raise Exception("num_layers must be greater than 0!") # TODO: Add exception handling for activation self.latent_size = latent_size self.output_size = output_size self.num_layers = num_layers self.activation = activation self.type = 'lin' class AutoEncoder(object): def __init__(self, encoder, decoder, mode, shape, backend=None): """ Parameters: ---------- encoder : Encoder encoder options for the autoencoder. decoder : Decoder decoder options of the autoencoder. mode : str Decides backend. Either 'pytorch' or 'keras'. shape : tuple Shape of X_train EX) shape=X_train.shape backend : str Required iff backend is 'keras'. Selects keras backend as 'tf' or 'th' or 'cntk'. """ if (encoder.type != decoder.type): raise Exception("Encoder and decoder must have the same type. Either both linear or both convolutional.") if (decoder.latent_size != encoder.latent_size): raise Exception("Encoder and decoder must have the same latent dimension.") if (mode != 'pytorch' and mode != 'keras'): raise Exception("mode must be 'pytorch' or 'keras'") if (mode != 'keras' and backend != None): raise Exception("Only specify backend if mode is 'keras'") self.mode = mode if (self.mode == 'keras'): if (backend != 'tf' and backend != 'th' and backend != 'cntk'): raise Exception("keras selected, must also specify backend 'tf', 'th', or 'cntk'") # TODO: Add checks to see that conv_e == conv_d and lin respecitevly. self.type = encoder.type self.backend = backend self.encoder = encoder self.decoder = decoder self.shape = shape self.model = self.buildAutoEncoder() def buildAutoEncoder(self): AE_model = None if (self.type == 'lin'): if (self.mode == 'pytorch'): AE_model = self.buildPytorchLinModel() elif (self.mode == 'keras'): AE_model = buildKerasLinModel() elif (self.type == 'conv'): if (self.mode == 'pytorch'): AE_model = buildPytorchConvModel() elif (self.mode == 'keras'): AE_model = buildKerasConvModel() if (AE_model == None): raise Exception("Internal Error: AE_model is uninitialized!") return AE_model def buildPytorchLinModel(self): encoder = lv.Encoder(self.encoder.input_size, self.encoder.latent_size) decoder = lv.Decoder(self.decoder.latent_size, self.decoder.output_size) return lv.VAE(encoder, decoder) def buildKerasLinModel(self): return None def buildPytorchConvModel(self): return None def buildKerasConvModel(self): # CODE: From vae_conv_train.py # define parameters for variational autoencoder - convolutional image_size = self.shape[-2:] channels = 1 conv_layers = 3 feature_maps = [128,128,128,128] filter_shapes = [(3,3),(3,3),(3,3),(3,3)] strides = [(1,1),(2,2),(1,1),(1,1)] dense_layers = 1 dense_neurons = [128] dense_dropouts = [0] latent_dim = 3 feature_maps = feature_maps[0:conv_layers] filter_shapes = filter_shapes[0:conv_layers] strides = strides[0:conv_layers] return cv_k.conv_variational_autoencoder(image_size,channels,conv_layers,feature_maps, filter_shapes,strides,dense_layers,dense_neurons, dense_dropouts,latent_dim) def train(x_train, y_train=None): if y_train == True: pass if (self.type == 'lin'): if (self.mode == 'pytorch'): self.model.buildPytorchLinModel() elif (self.mode == 'keras'): self.model.buildKerasLinModel() elif (self.type == 'conv'): if (self.mode == 'pytorch'): self.model.buildPytorchConvModel() elif (self.mode == 'keras'): self.model.buildKerasConvModel() def predict(x_test, y_test=None): pass
#-*-coding:utf-8-*- class Solution(object): def __init__(self): self.word = None self.board = None self.tag = None def clean_tag(self): self.tag = [[False]*len(self.board[0]) for row in range(len(self.board))] def deep_search(self,board, word, x, y): direction = [[0,1], [0,-1], [1,0], [-1,0]] self.tag[x][y] = True #print x,y,board[x][y] if len(word) is 1 and word[0] == board[x][y]: return True word = word[1:] nc = word[0] #print 'nc=>',nc for d in direction: nx = x + d[0] ny = y + d[1] # detection the bounds #print 'next direction=>',nx,ny if nx < 0 or ny < 0 or nx > len(board)-1 or ny > len(board[0])-1: continue #print 'next=>',board[nx][ny], nc, self.tag[nx][ny] # detection the char if board[nx][ny] == nc and self.tag[nx][ny] == False: if self.deep_search(board, word, nx, ny): return True self.tag[nx][ny] = False def exist(self, board, word): """ :type board: List[List[str]] :type word: str :rtype: bool """ self.word = word self.board = board self.tag = [[False]*len(self.board[0]) for row in range(len(self.board))] for x in xrange(len(board)): for y in xrange(len(board[x])): if board[x][y] == word[0]: if self.deep_search(board, word, x, y): return True self.clean_tag() return False if __name__=='__main__': s = Solution() board=[ "FYCENRD", "KLNFINU", "AAARAHR", "NDKLPNE", "ALANSAP", "OOGOTPN", "HPOLANO"] word="POLAND" print s.exist(board, word)
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python import os import sys import re import string import math import sqlite3 # #--- reading directory list # path = '/data/mta/Script/MSID_limit/Scripts/house_keeping/dir_list' with open(path, 'r') as f: data = [line.strip() for line in f.readlines()] for ent in data: atemp = re.split(':', ent) var = atemp[1].strip() line = atemp[0].strip() exec("%s = %s" %(var, line)) glimmon = main_dir +'glimmondb.sqlite3' msid = sys.argv[1].strip().lower() db = sqlite3.connect(glimmon) cursor = db.cursor() cursor.execute("SELECT * FROM limits WHERE msid='%s'" %msid) allrows = cursor.fetchall() if len(allrows) == 0: print("not in glimmon database") exit(1) for ent in allrows: print(str(ent))
import nltk import os from nltk.corpus import stopwords def f1(): stoplist = stopwords.words('english') print(stoplist) text = '' for root,dirs,files in os.walk('./script/'): for file in files: with open('./script/{0}'.format(file),'r') as fr: for line in fr: text += line.strip(' ') ''' with open('./script/{0}'.format('15-Minutes.txt'),'r') as fr: for line in fr: text += line.strip(' ') ''' print(text) with open('log.txt','w') as f: f.write(text) sents = nltk.sent_tokenize(text) words = [] for sentce in sents: for word in nltk.word_tokenize(sentce): if word not in stoplist: words.append(word) #print(words) fdist = nltk.FreqDist(words) with open('nltk_log','w') as f: for i in fdist.keys(): f.write(i + ' ') print(fdist.keys()) from sklearn.cluster import KMeans from gensim.models import Word2Vec from gensim.models.word2vec import LineSentence with open('nltk_log','r') as nltk_log: from sklearn.externals import joblib # word2vec向量化 model = Word2Vec(LineSentence(nltk_log), size=100, window=5, min_count=1, workers=4) # 获取model里面的说有关键词 keys = model.wv.vocab.keys() # 获取词对于的词向量 wordvector = [] for key in keys: wordvector.append(model[key]) # 分类 clf = KMeans(n_clusters=10) s = clf.fit(wordvector) print(s) # 获取到所有词向量所属类别 labels = clf.labels_ # 把是一类的放入到一个集合 classCollects = {} for i in range(len(keys)): if str(labels[i]) in classCollects.keys(): classCollects[str(labels[i])].append(list(keys)[i]) else: classCollects[str(labels[i])] = [list(keys)[i]] print(type(classCollects)) print(classCollects) with open('sklearn_log.txt','w') as f: f.write(str(classCollects))
import sqlite3 import re from Travel.models.User import User from werkzeug.security import generate_password_hash class Repository(object): def __init__(self, connectionString): self.__conn = sqlite3.connect(connectionString, check_same_thread=False) # self.__conn = sqlite3.connect(r'C:\Users\junes\OneDrive\Documents\Visual Studio 2015\Projects\pythonProject\TravelPlanner\TravelPlanner\data.sqlite') def __del__(self): self.__conn.close() def get_user_obj(self, email): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("select user_pw, user_id, user_pw, fname, lname from users where user_id = ?", (email,)) first_line = exe.fetchone() if first_line: user_obj = User(first_line[1], first_line[0], first_line[3], first_line[4], 1) # email, password, first_name, last_name ### REMOVE LATER END return user_obj else: return None except Exception as e: print("Error Occured in Repository.get_password" + str(e) ) # prompt to create an account with the email(==user id) raise ### WORKING :D ************************************************************************** def update_user(self, user_obj, user_id): try: with self.__conn: cursor = self.__conn.cursor() cursor.execute("UPDATE users SET user_id=?, user_pw=?, fname=?, lname=? WHERE user_id = ?", (user_obj.email, user_obj.pw_hash, user_obj.fname, user_obj.lname, user_id)) except Exception as e: print("Error Occured in Repository.update_user" + str(e)) raise pass def create_user(self, user_obj): try: with self.__conn: cursor = self.__conn.cursor() cursor.execute("insert into users (user_id, user_pw, fname, lname) values (?, ?, ?, ?)", (user_obj.email, user_obj.pw_hash, user_obj.fname, user_obj.lname)) except Exception as e: print("Error Occured in Repository.create_user" + str(e)) raise def get_admin_user_id(self, trip_id): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("select user_id_s from trips where trip_id = ?", (trip_id,)) first_line = exe.fetchone() user_id = None if first_line and first_line[0]: user_id = first_line[0] return user_id except Exception as e: print("Error Occured in Repository.get_admin_user_id" + str(e) ) raise pass # return True if trip_id is existing in table 'trips' def check_exist_trip(self, existing_trip_id): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("select * from trips where trip_id=?", (existing_trip_id,)) first_line = exe.fetchone() if first_line: return True else: return False except Exception as e: print("Error Occured in Repository.check_exist_trip" + str(e) ) raise pass def add_trip_name(self, trip_obj, admin): # arg 'admin' is boolean try: with self.__conn: cursor = self.__conn.cursor() if admin: cursor.execute("insert into trips (trip_id, trip_name, user_id_s, start_datetime, end_datetime) values (?, ?, ?, ?, ?)", (trip_obj.trip_id, trip_obj.trip_name, trip_obj.user_id, trip_obj.start_datetime, trip_obj.end_datetime)) column = "trip_admin" else: column = "trip_user" trip_admin = cursor.execute("select " + column + " from users where user_id = ?", (trip_obj.user_id,)) trip_result = trip_admin.fetchone() trip_str = '' if trip_result[0] is None or trip_result[0] == '' : trip_str = trip_obj.trip_id else: trip_result_list = [] trip_result_list.append(trip_result[0]) trip_result_list.append(trip_obj.trip_id) trip_str = ', '.join(trip_result_list) if admin: cursor.execute("UPDATE users SET trip_admin=? WHERE user_id=?", (trip_str, trip_obj.user_id)) pass else: cursor.execute("UPDATE users SET trip_user=? WHERE user_id=?", (trip_str, trip_obj.user_id)) pass except Exception as e: print("Error Occured in Repository.add_trip_name" + str(e)) raise def get_trip_name(self, trip_id): try: cursor = self.__conn.cursor() exe = cursor.execute("select trip_name from trips where trip_id= ?", (trip_id,) ) result = exe.fetchone() if result and result[0]: return result[0] else: None except Exception as e: print("ERROR OCCURED in Repository.get_trip_name: " + str(e)) raise def get_trips_from_trips_table(self, trip_id): try: list_all = [] cursor = self.__conn.cursor() exe = cursor.execute("select trip_id, trip_name, user_id_s, start_datetime, end_datetime from trips where trip_id= ?", (trip_id,) ) # user_id = ?", (user_id,) result = exe.fetchone() if result: list_all.extend(result) else: list_all = [] return list_all except Exception as e: print("ERROR OCCURED in Repository.get_trips_from_trips_table: " + str(e)) raise def get_trips(self): try: list_all = [] cursor = self.__conn.cursor() exe = cursor.execute("select * from trips") result = exe.fetchall() list_all.extend(result) return list_all except Exception as e: print("ERROR OCCURED in Repository.get_trips....shhhh...: " + str(e)) raise def get_trip_datetime_start(self, event_obj): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("select start_datetime from trips where trip_id = ?", (event_obj.travel_id,)) # event_obj.travel_id, event_obj.datetime first_line = exe.fetchone() if first_line and first_line[0]: return first_line[0] else: return None except Exception as e: print("Error OCCURED in Repository.get_trip_datetime_start : " + str(e)) raise pass def get_trip_datetime_end(self, event_obj): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("select end_datetime from trips where trip_id = ?", (event_obj.travel_id,)) # event_obj.travel_id, event_obj.datetime first_line = exe.fetchone() if first_line and first_line[0]: return first_line[0] else: return None except Exception as e: print("Error OCCURED in Repository.get_trip_datetime_end : " + str(e)) raise pass def update_trip_datetime_start(self, datetime, travel_id): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("UPDATE trips SET start_datetime=? WHERE trip_id=?", (datetime, travel_id)) pass except Exception as e: print("Error OCCURED in Repository.update_trip_datetime_start : " + str(e)) raise pass def update_trip_datetime_end(self, datetime, travel_id): try: with self.__conn: cursor = self.__conn.cursor() exe = cursor.execute("UPDATE trips SET end_datetime=? WHERE trip_id=?", (datetime, travel_id)) pass except Exception as e: print("Error OCCURED in Repository.update_trip_datetime_end : " + str(e)) raise pass # ADD EVENT OBJ : Flight def add_flight(self, flight_obj): try: with self.__conn: cursor = self.__conn.cursor() cursor.execute("insert into Flight (travel_id, title, datetime, flight_info, live_status) values(?, ?, ?, ?, ?)", (flight_obj.travel_id, flight_obj.title, flight_obj.datetime, flight_obj.info, flight_obj.status)) except Exception as e: print("ERROR OCCURED in Repository.add_flight: " + str(e) ) raise return # ADD EVENT OBJ : Hotel def add_hotel(self, hotel_obj): try: with self.__conn: cursor = self.__conn.cursor() cursor.execute("insert into Hotel (travel_id, title, datetime, hotel_info) values(?, ?, ?, ?)", (hotel_obj.travel_id, hotel_obj.title, hotel_obj.datetime, hotel_obj.info)) except Exception as e: print("ERROR OCCURED in Repository.add_hotel: " + str(e) ) raise return # ADD EVENT OBJ : Place def add_place(self, place_obj): try: with self.__conn: cursor = self.__conn.cursor() cursor.execute("insert into Place (travel_id, title, datetime, place_info) values(?, ?, ?, ?)", (place_obj.travel_id, place_obj.title, place_obj.datetime, place_obj.info)) except Exception as e: print("ERROR OCCURED in Repository.add_place: " + str(e) ) raise return ########################################################################################## ## OLD START ## def add_travel_info(self, flight_obj, hotel_obj, place_obj): try: with self.__conn: cursor = self.__conn.cursor() cursor.execute("insert into Flight (travel_id, title, datetime, flight_info, live_status) values(?, ?, ?, ?, ?)", (flight_obj.travel_id, flight_obj.title, flight_obj.datetime, flight_obj.info, flight_obj.status)) cursor.execute("insert into Hotel (travel_id, title, datetime, hotel_info) values(?, ?, ?, ?)", (hotel_obj.travel_id, hotel_obj.title, hotel_obj.datetime, hotel_obj.info)) cursor.execute("insert into Place (travel_id, title, datetime, place_info) values(?, ?, ?, ?)", (place_obj.travel_id, place_obj.title, place_obj.datetime, place_obj.info)) except Exception as e: print("ERROR OCCURED in Repository.add_travel_info: "+ e) # Do something here raise def get_trip_users(self, user_id): # return non-admin trip ids try: cursor = self.__conn.cursor() user_list = [] trip_user = cursor.execute("select trip_user from users where user_id = ?", (user_id,)) user_result = trip_user.fetchone() if admin_result is None: return None user_s = str(user_result[0]) # comma separated string to list pattern = re.compile("^\s+|\s*,\s*|\s+$") user_list = [x for x in pattern.split(admin_s) if x] return user_list # type: list except Exception as e: # Do something here print("ERROR OCCURED in Repository.get_trip_users:" + str(e) ) raise return # get trip_admin trip ids from users and return all the admin : TRIP IDs def get_trip_list(self, user_id, admin): try: cursor = self.__conn.cursor() trip_list = [] if admin: column = "trip_admin" else: column = "trip_user" trip_admin = cursor.execute("select " + column + " from users where user_id = ?", (user_id,)) admin_result = trip_admin.fetchone() if admin_result is None: return None admin_s = str(admin_result[0]) # comma separated string to list pattern = re.compile("^\s+|\s*,\s*|\s+$") trip_list = [x for x in pattern.split(admin_s) if x] return trip_list except Exception as e: # Do something here print("ERROR OCCURED in Repository.get_trip_list:" + str(e) ) raise return def get_travel_events(self, travel_id): try: cursor = self.__conn.cursor() list_all = [] exe = cursor.execute("select title, flight_info, datetime, live_status from Flight where travel_id = ? ", (travel_id,)) result = exe.fetchall() if result and result[0]: list_all.extend(result) exe = cursor.execute("select title, hotel_info, datetime from Hotel where travel_id = ? ", (travel_id,)) result = exe.fetchall() if result and result[0]: list_all.extend(result) exe = cursor.execute("select title, place_info, datetime from Place where travel_id = ? ", (travel_id,)) result = exe.fetchall() if result and result[0]: list_all.extend(result) # fetchone() --> get only the first line print(list_all) # print tuple here return list_all except Exception as e: # Do something here print("ERROR OCCURED in Repository.get_travel_events:" + str(e) ) raise return
''' Iterable is a sequence of data, one can iterate over using a loop. An iterator is an object adhering to the iterator portocol. Basically this means that it has a "next" method, which, when called returns the next item in the sequence, and when there's nothing to return, raise the StopIteration exception. ''' ''' Why is iterator useful. When an iterator is used to power a loop, the loop becomes very simple. The code to initialise the state, to decide if the loop is finished, and t ofind the next value is extracted into a separate place, therefore highlighting the body of the loop. ''' ''' Calling the __iter__ method on a container to create an iterator object is the most straightforward way to get hold of an iterator. The iter function does that for us. Similarly, the next function will call the __next__ method of the iterator. '''
from itertools import chain,combinations def powerset(iterable): s = list(iterable) return chain.from_iterable(combinations(s,r) for r in range(len(s)+1)) def transicion(estado,sigma): global delta print(estado,sigma) estado_siguiente = delta[(estado,sigma)] print("transicion(",estado,",",sigma,")->",estado_siguiente) return estado_siguiente Q = ['q0','q1'] s = 'q0' F = ['q1'] sprima = (s,) sigma = ['a','b'] DELTA = { ('q0','a'):['q0','q1'], ('q0','b'):['q1'], ('q1','b'):['q0','q1'] } Qprima = list(powerset(Q)) Fprima = [] for q in Qprima: for x in q: if x in F: Fprima.append(q) delta = {} for qprima in Qprima: for s in sigma: print(qprima,s) P = [] ; #print("Detectando los estados p a donde se puede llegar con",s,"desde",qprima) for q in qprima: if(q,s) in DELTA.keys(): #print("DELTA[(",q,",",s,")]",DELTA[(q,s)]) for p in DELTA[(q,s)]: if not (p in P): P.append(p) #print("P=",P) if len(P)>0: P.sort() delta[(qprima,s)] = tuple(P) print("delta(",qprima,",",s,")=",delta[(qprima,s)]) ejemplos = ["a","b","aaa","baaa","aaaaaaba","abbb"] for w in ejemplos: estado = sprima for c in w: estado = transicion(estado,c) if estado in Fprima: print("{} si esta en el lenguaje".format(w)) else: print("{} No esta en el lenguaje".format(w))
from django.contrib import admin from django.utils.translation import ugettext_lazy as _ from .models import Audit # Forms # Inlines # djstackedinline, djtabinline # Models @admin.register(Audit) class AuditAdmin(admin.ModelAdmin): pass
from tfcgp.problem import Problem from tfcgp.chromosome import Chromosome import numpy as np import os class LearnEvolver: def __init__(self, problem, config, logname='test', root_dir='.'): self.config = config self.problem = problem self.epochs = 1*self.problem.epochs self.max_learn_fit = 0.0 self.max_evo_fit = 0.0 self.max_fit = 0.0 self.evo_best = Chromosome(self.problem.nin, self.problem.nout) self.evo_best.random(config) self.learn_best = Chromosome(self.problem.nin, self.problem.nout) self.learn_best.from_genes(self.evo_best.genes, config) self.logfile = os.path.join(root_dir, 'logs', logname+'.log') self.logname = logname self.generation = 0 def run(self, n_steps): for i in range(n_steps): self.step() def mutate(self, chromosome): child_genes = np.copy(chromosome.genes) change = np.random.rand(len(child_genes)) < self.config.cfg["mutation_rate"] child_genes[change] = np.random.rand(np.sum(change)) child = Chromosome(self.problem.nin, self.problem.nout) child.from_genes(child_genes, self.config) return child def step(self): evo_delete = True learn_delete = True evo_child = self.mutate(self.evo_best) self.problem.epochs = 0 fitness, history = self.problem.get_fitness(evo_child) if fitness >= self.max_evo_fit: self.evo_best = evo_child self.max_evo_fit = fitness evo_delete = False if fitness >= self.max_learn_fit: self.learn_best = evo_child self.max_learn_fit = fitness evo_delete = False learn_child = self.mutate(self.learn_best) self.problem.epochs = self.epochs fitness, history = self.problem.get_fitness(learn_child) if fitness >= self.max_learn_fit: self.learn_best = learn_child self.max_learn_fit = fitness learn_delete = False new_max = max(self.max_evo_fit, self.max_learn_fit) if new_max > self.max_fit: self.max_fit = new_max with open(self.logfile, 'a') as f: for i in range(len(history)): # type,logname,gen,eval,epoch,total_epochs,loss,acc,best f.write('L,%s,%d,%d,%d,%d,%0.10f,%0.10f,%0.10f\n' % (self.logname, self.generation, self.problem.eval_count, i, self.problem.epochs, history[i][0], history[i][1], new_max)) f.write('E,%s,%d,%d,%d,%d,%0.10f,%0.10f,%0.10f\n' % (self.logname, self.generation, self.problem.eval_count, 0, self.problem.epochs, 0.0, 0.0, new_max)) if evo_delete: del evo_child if learn_delete: del learn_child self.generation += 1
""" Programmer: Chris Tralie / IDS 301 Class Purpose: To load in Trump's tweets since 11/1/2016, and to do some data wrangling on them """ import pickle import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd # A dictionary for converting from the 3 letter months # that Twitter gives back to a 2-digit month string MONTHS = {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'} def get_devices(tweets): """ Print out a dictionary which counts how many tweets Trump made on each different type of device Parameters ---------- tweets: list of dictionary A list of tweets, each of which is a dictionary """ devices = {} # The dictionary will hold "device name":counts for tweet in tweets: # This is one way to loop through a list device = tweet['source'] # If the device hasn't been seen yet, we need to make # A key for it in the dictionary if not (device in devices): devices[device] = 0 # Set the initial count to be zero devices[device] += 1 # Add one to the counts for this device print(devices) def get_most_favorited(tweets): """ Print out the tweet with the maximum number of retweets Parameters ---------- tweets: list of dictionary A list of tweets, each of which is a dictionary """ # First, we setup a parallel numpy array with the same # number of elements as there are tweets counts = np.zeros(len(tweets)) # Then, this loop fills the list with the retweet counts for i, tweet in enumerate(tweets): counts[i] = tweet['retweet_count'] # Finally, we can use the nifty "argmax" function in # numpy to pull out the index with the maximum counts max_index = np.argmax(counts) # We then use this index back in the original tweets list # to print out the tweet with the maximum counts print(tweets[max_index]) def get_tweet_date(tweet): """ Return a date in Year/MM/DD format, which ensures that sorting by the string will sort the tweets in alphabetical order Parameters ---------- tweet: dictionary The tweet dictionary Returns ------- day: Year/MM/DD string """ date = tweet['created_at'] # Separate out date into components in a list # Each element is a different component separated # by a space fields = date.split() # The year is the last field year = fields[-1] # Use the dictionary defined at the top of the file # to convert from a three letter month to a two digit month month = MONTHS[fields[1]] # This magic code formats a day to be 2 digits, potentially # with a leading zero day = "%02d"%int(fields[2]) return year + "/" + month + "/" + day tweets = pickle.load(open("trumpSinceElection.dat", "rb")) get_devices(tweets) get_most_favorited(tweets) print(get_tweet_date(tweets[0]))
import requests import base64 import json import cv2 import numpy as np classes = ["cat","dog"] #image = r"12498.jpg" # dog #image = r"89.jpg" # cat image = r"12500.jpg" # cat URL = "http://127.0.0.1:8501/v1/models/model/versions/1:predict" headers = {"content-type": "application/json"} headers = {"content-type": "application/json"} image_content = cv2.imread(image,1).astype('uint8') width = 150 height = 150 dim = (width, height) resized = cv2.resize(image_content, dim, interpolation = cv2.INTER_AREA) print('Original Dimensions : ',image_content.shape) print('Original Dimensions : ',resized.shape) body = {"instances": [resized.tolist()]} data = json.dumps(body) r = requests.post(URL, data=data, headers = headers) predictions = json.loads(r.text)['predictions'] print(r.text) print(predictions[0][0]) if predictions[0][0] < 0.5: prediction = 0 else: prediction = 1 print(classes[prediction])